use axum::{
extract::{Path, Query, State},
http::{self, StatusCode},
response::{
sse::{Event, Sse},
Html, IntoResponse, Json,
},
};
use chrono::Utc;
use futures_util::stream::{self, Stream};
use mockforge_core::{Error, Result};
use mockforge_plugin_loader::PluginRegistry;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::convert::Infallible;
use std::process::Command;
use std::process::Stdio;
use std::sync::Arc;
use std::time::Duration;
use sysinfo::System;
use tokio::sync::RwLock;
use crate::models::{
ApiResponse, ConfigUpdate, DashboardData, DashboardSystemInfo, FaultConfig, HealthCheck,
LatencyProfile, LogFilter, MetricsData, ProxyConfig, RequestLog, RouteInfo, ServerInfo,
ServerStatus, SimpleMetricsData, SystemInfo, TrafficShapingConfig, ValidationSettings,
ValidationUpdate,
};
use mockforge_core::workspace_import::{ImportResponse, ImportRoute};
use mockforge_plugin_loader::{
GitPluginConfig, GitPluginLoader, PluginLoader, PluginLoaderConfig, PluginSource,
RemotePluginConfig, RemotePluginLoader,
};
pub mod admin;
pub mod ai_studio;
pub mod analytics;
pub mod analytics_stream;
pub mod analytics_v2;
pub mod assets;
pub mod behavioral_cloning;
pub mod chains;
pub mod chaos_api;
pub mod community;
pub mod contract_diff;
pub mod coverage_metrics;
pub mod failure_analysis;
pub mod federation_api;
pub mod graph;
pub mod health;
pub mod migration;
pub mod pillar_analytics;
pub mod playground;
pub mod plugin;
pub mod promotions;
pub mod protocol_contracts;
pub mod recorder_api;
pub mod vbr_api;
pub mod verification;
pub mod voice;
pub mod workspaces;
pub mod world_state_proxy;
pub use assets::*;
pub use chains::*;
pub use graph::*;
pub use migration::*;
pub use plugin::*;
use mockforge_core::workspace_import::WorkspaceImportConfig;
use mockforge_core::workspace_persistence::WorkspacePersistence;
#[derive(Debug, Clone, Default)]
pub struct RequestMetrics {
pub total_requests: u64,
pub active_connections: u64,
pub requests_by_endpoint: HashMap<String, u64>,
pub response_times: Vec<u64>,
pub response_times_by_endpoint: HashMap<String, Vec<u64>>,
pub errors_by_endpoint: HashMap<String, u64>,
pub last_request_by_endpoint: HashMap<String, chrono::DateTime<Utc>>,
}
#[derive(Debug, Clone)]
pub struct SystemMetrics {
pub memory_usage_mb: u64,
pub cpu_usage_percent: f64,
pub active_threads: u32,
}
#[derive(Debug, Clone)]
pub struct TimeSeriesPoint {
pub timestamp: chrono::DateTime<Utc>,
pub value: f64,
}
#[derive(Debug, Clone, Default)]
pub struct TimeSeriesData {
pub memory_usage: Vec<TimeSeriesPoint>,
pub cpu_usage: Vec<TimeSeriesPoint>,
pub request_count: Vec<TimeSeriesPoint>,
pub response_time: Vec<TimeSeriesPoint>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RestartStatus {
pub in_progress: bool,
pub initiated_at: Option<chrono::DateTime<Utc>>,
pub reason: Option<String>,
pub success: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureInfo {
pub id: String,
pub protocol: String,
pub method: String,
pub path: String,
pub saved_at: chrono::DateTime<Utc>,
pub file_size: u64,
pub file_path: String,
pub fingerprint: String,
pub metadata: serde_json::Value,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SmokeTestResult {
pub id: String,
pub name: String,
pub method: String,
pub path: String,
pub description: String,
pub last_run: Option<chrono::DateTime<Utc>>,
pub status: String,
pub response_time_ms: Option<u64>,
pub error_message: Option<String>,
pub status_code: Option<u16>,
pub duration_seconds: Option<f64>,
}
#[derive(Debug, Clone)]
pub struct SmokeTestContext {
pub base_url: String,
pub timeout_seconds: u64,
pub parallel: bool,
}
#[derive(Debug, Clone, Serialize)]
pub struct ConfigurationState {
pub latency_profile: LatencyProfile,
pub fault_config: FaultConfig,
pub proxy_config: ProxyConfig,
pub validation_settings: ValidationSettings,
pub traffic_shaping: TrafficShapingConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ImportHistoryEntry {
pub id: String,
pub format: String,
pub timestamp: chrono::DateTime<Utc>,
pub routes_count: usize,
pub variables_count: usize,
pub warnings_count: usize,
pub success: bool,
pub filename: Option<String>,
pub environment: Option<String>,
pub base_url: Option<String>,
pub error_message: Option<String>,
}
#[derive(Clone)]
pub struct AdminState {
pub http_server_addr: Option<std::net::SocketAddr>,
pub ws_server_addr: Option<std::net::SocketAddr>,
pub grpc_server_addr: Option<std::net::SocketAddr>,
pub graphql_server_addr: Option<std::net::SocketAddr>,
pub api_enabled: bool,
pub admin_port: u16,
pub start_time: chrono::DateTime<Utc>,
pub metrics: Arc<RwLock<RequestMetrics>>,
pub system_metrics: Arc<RwLock<SystemMetrics>>,
pub config: Arc<RwLock<ConfigurationState>>,
pub logs: Arc<RwLock<Vec<RequestLog>>>,
pub time_series: Arc<RwLock<TimeSeriesData>>,
pub restart_status: Arc<RwLock<RestartStatus>>,
pub smoke_test_results: Arc<RwLock<Vec<SmokeTestResult>>>,
pub import_history: Arc<RwLock<Vec<ImportHistoryEntry>>>,
pub workspace_persistence: Arc<WorkspacePersistence>,
pub plugin_registry: Arc<RwLock<PluginRegistry>>,
pub reality_engine: Arc<RwLock<mockforge_core::RealityEngine>>,
pub continuum_engine: Arc<RwLock<mockforge_core::RealityContinuumEngine>>,
pub chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
pub latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
pub mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
pub recorder: Option<Arc<mockforge_recorder::Recorder>>,
pub federation: Option<Arc<mockforge_federation::Federation>>,
pub vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
}
impl AdminState {
pub async fn start_system_monitoring(&self) {
let state_clone = self.clone();
tokio::spawn(async move {
let mut sys = System::new_all();
let mut refresh_count = 0u64;
tracing::info!("Starting system monitoring background task");
loop {
sys.refresh_all();
let cpu_usage = sys.global_cpu_usage();
let _total_memory = sys.total_memory() as f64;
let used_memory = sys.used_memory() as f64;
let memory_usage_mb = used_memory / 1024.0 / 1024.0;
let active_threads = sys.cpus().len() as u32;
let memory_mb_u64 = memory_usage_mb as u64;
if refresh_count.is_multiple_of(10) {
tracing::debug!(
"System metrics updated: CPU={:.1}%, Mem={}MB, Threads={}",
cpu_usage,
memory_mb_u64,
active_threads
);
}
state_clone
.update_system_metrics(memory_mb_u64, cpu_usage as f64, active_threads)
.await;
refresh_count += 1;
tokio::time::sleep(Duration::from_secs(10)).await;
}
});
}
#[allow(clippy::too_many_arguments)]
pub fn new(
http_server_addr: Option<std::net::SocketAddr>,
ws_server_addr: Option<std::net::SocketAddr>,
grpc_server_addr: Option<std::net::SocketAddr>,
graphql_server_addr: Option<std::net::SocketAddr>,
api_enabled: bool,
admin_port: u16,
chaos_api_state: Option<Arc<mockforge_chaos::api::ChaosApiState>>,
latency_injector: Option<Arc<RwLock<mockforge_core::latency::LatencyInjector>>>,
mockai: Option<Arc<RwLock<mockforge_core::intelligent_behavior::MockAI>>>,
continuum_config: Option<mockforge_core::ContinuumConfig>,
virtual_clock: Option<Arc<mockforge_core::VirtualClock>>,
recorder: Option<Arc<mockforge_recorder::Recorder>>,
federation: Option<Arc<mockforge_federation::Federation>>,
vbr_engine: Option<Arc<mockforge_vbr::VbrEngine>>,
) -> Self {
let start_time = Utc::now();
Self {
http_server_addr,
ws_server_addr,
grpc_server_addr,
graphql_server_addr,
api_enabled,
admin_port,
start_time,
metrics: Arc::new(RwLock::new(RequestMetrics::default())),
system_metrics: Arc::new(RwLock::new(SystemMetrics {
memory_usage_mb: 0,
cpu_usage_percent: 0.0,
active_threads: 0,
})),
config: Arc::new(RwLock::new(ConfigurationState {
latency_profile: LatencyProfile {
name: "default".to_string(),
base_ms: 50,
jitter_ms: 20,
tag_overrides: HashMap::new(),
},
fault_config: FaultConfig {
enabled: false,
failure_rate: 0.0,
status_codes: vec![500, 502, 503],
active_failures: 0,
},
proxy_config: ProxyConfig {
enabled: false,
upstream_url: None,
timeout_seconds: 30,
requests_proxied: 0,
},
validation_settings: ValidationSettings {
mode: "enforce".to_string(),
aggregate_errors: true,
validate_responses: false,
overrides: HashMap::new(),
},
traffic_shaping: TrafficShapingConfig {
enabled: false,
bandwidth: crate::models::BandwidthConfig {
enabled: false,
max_bytes_per_sec: 1_048_576,
burst_capacity_bytes: 10_485_760,
tag_overrides: HashMap::new(),
},
burst_loss: crate::models::BurstLossConfig {
enabled: false,
burst_probability: 0.1,
burst_duration_ms: 5000,
loss_rate_during_burst: 0.5,
recovery_time_ms: 30000,
tag_overrides: HashMap::new(),
},
},
})),
logs: Arc::new(RwLock::new(Vec::new())),
time_series: Arc::new(RwLock::new(TimeSeriesData::default())),
restart_status: Arc::new(RwLock::new(RestartStatus {
in_progress: false,
initiated_at: None,
reason: None,
success: None,
})),
smoke_test_results: Arc::new(RwLock::new(Vec::new())),
import_history: Arc::new(RwLock::new(Vec::new())),
workspace_persistence: Arc::new(WorkspacePersistence::new("./workspaces")),
plugin_registry: Arc::new(RwLock::new(PluginRegistry::new())),
reality_engine: Arc::new(RwLock::new(mockforge_core::RealityEngine::new())),
continuum_engine: Arc::new(RwLock::new({
let config = continuum_config.unwrap_or_default();
if let Some(clock) = virtual_clock {
mockforge_core::RealityContinuumEngine::with_virtual_clock(config, clock)
} else {
mockforge_core::RealityContinuumEngine::new(config)
}
})),
chaos_api_state,
latency_injector,
mockai,
recorder,
federation,
vbr_engine,
}
}
pub async fn record_request(
&self,
method: &str,
path: &str,
status_code: u16,
response_time_ms: u64,
error: Option<String>,
) {
let mut metrics = self.metrics.write().await;
metrics.total_requests += 1;
let endpoint = format!("{} {}", method, path);
*metrics.requests_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
if status_code >= 400 {
*metrics.errors_by_endpoint.entry(endpoint.clone()).or_insert(0) += 1;
}
metrics.response_times.push(response_time_ms);
if metrics.response_times.len() > 100 {
metrics.response_times.remove(0);
}
let endpoint_times = metrics
.response_times_by_endpoint
.entry(endpoint.clone())
.or_insert_with(Vec::new);
endpoint_times.push(response_time_ms);
if endpoint_times.len() > 50 {
endpoint_times.remove(0);
}
metrics.last_request_by_endpoint.insert(endpoint, Utc::now());
let total_requests = metrics.total_requests;
drop(metrics);
self.update_time_series_on_request(response_time_ms, total_requests).await;
let mut logs = self.logs.write().await;
let log_entry = RequestLog {
id: format!("req_{}", total_requests),
timestamp: Utc::now(),
method: method.to_string(),
path: path.to_string(),
status_code,
response_time_ms,
client_ip: None,
user_agent: None,
headers: HashMap::new(),
response_size_bytes: 0,
error_message: error,
};
logs.push(log_entry);
if logs.len() > 1000 {
logs.remove(0);
}
}
pub async fn get_metrics(&self) -> RequestMetrics {
self.metrics.read().await.clone()
}
pub async fn update_system_metrics(&self, memory_mb: u64, cpu_percent: f64, threads: u32) {
let mut system_metrics = self.system_metrics.write().await;
system_metrics.memory_usage_mb = memory_mb;
system_metrics.cpu_usage_percent = cpu_percent;
system_metrics.active_threads = threads;
self.update_time_series_data(memory_mb as f64, cpu_percent).await;
}
async fn update_time_series_data(&self, memory_mb: f64, cpu_percent: f64) {
let now = Utc::now();
let mut time_series = self.time_series.write().await;
time_series.memory_usage.push(TimeSeriesPoint {
timestamp: now,
value: memory_mb,
});
time_series.cpu_usage.push(TimeSeriesPoint {
timestamp: now,
value: cpu_percent,
});
let metrics = self.metrics.read().await;
time_series.request_count.push(TimeSeriesPoint {
timestamp: now,
value: metrics.total_requests as f64,
});
let avg_response_time = if !metrics.response_times.is_empty() {
metrics.response_times.iter().sum::<u64>() as f64 / metrics.response_times.len() as f64
} else {
0.0
};
time_series.response_time.push(TimeSeriesPoint {
timestamp: now,
value: avg_response_time,
});
const MAX_POINTS: usize = 100;
if time_series.memory_usage.len() > MAX_POINTS {
time_series.memory_usage.remove(0);
}
if time_series.cpu_usage.len() > MAX_POINTS {
time_series.cpu_usage.remove(0);
}
if time_series.request_count.len() > MAX_POINTS {
time_series.request_count.remove(0);
}
if time_series.response_time.len() > MAX_POINTS {
time_series.response_time.remove(0);
}
}
pub async fn get_system_metrics(&self) -> SystemMetrics {
self.system_metrics.read().await.clone()
}
pub async fn get_time_series_data(&self) -> TimeSeriesData {
self.time_series.read().await.clone()
}
pub async fn get_restart_status(&self) -> RestartStatus {
self.restart_status.read().await.clone()
}
pub async fn initiate_restart(&self, reason: String) -> Result<()> {
let mut status = self.restart_status.write().await;
if status.in_progress {
return Err(Error::generic("Restart already in progress".to_string()));
}
status.in_progress = true;
status.initiated_at = Some(Utc::now());
status.reason = Some(reason);
status.success = None;
Ok(())
}
pub async fn complete_restart(&self, success: bool) {
let mut status = self.restart_status.write().await;
status.in_progress = false;
status.success = Some(success);
}
pub async fn get_smoke_test_results(&self) -> Vec<SmokeTestResult> {
self.smoke_test_results.read().await.clone()
}
pub async fn update_smoke_test_result(&self, result: SmokeTestResult) {
let mut results = self.smoke_test_results.write().await;
if let Some(existing) = results.iter_mut().find(|r| r.id == result.id) {
*existing = result;
} else {
results.push(result);
}
if results.len() > 100 {
results.remove(0);
}
}
pub async fn clear_smoke_test_results(&self) {
let mut results = self.smoke_test_results.write().await;
results.clear();
}
async fn update_time_series_on_request(&self, response_time_ms: u64, total_requests: u64) {
let now = Utc::now();
let mut time_series = self.time_series.write().await;
time_series.request_count.push(TimeSeriesPoint {
timestamp: now,
value: total_requests as f64,
});
time_series.response_time.push(TimeSeriesPoint {
timestamp: now,
value: response_time_ms as f64,
});
const MAX_POINTS: usize = 100;
if time_series.request_count.len() > MAX_POINTS {
time_series.request_count.remove(0);
}
if time_series.response_time.len() > MAX_POINTS {
time_series.response_time.remove(0);
}
}
pub async fn get_config(&self) -> ConfigurationState {
self.config.read().await.clone()
}
pub async fn update_latency_config(
&self,
base_ms: u64,
jitter_ms: u64,
tag_overrides: HashMap<String, u64>,
) {
let mut config = self.config.write().await;
config.latency_profile.base_ms = base_ms;
config.latency_profile.jitter_ms = jitter_ms;
config.latency_profile.tag_overrides = tag_overrides;
}
pub async fn update_fault_config(
&self,
enabled: bool,
failure_rate: f64,
status_codes: Vec<u16>,
) {
let mut config = self.config.write().await;
config.fault_config.enabled = enabled;
config.fault_config.failure_rate = failure_rate;
config.fault_config.status_codes = status_codes;
}
pub async fn update_proxy_config(
&self,
enabled: bool,
upstream_url: Option<String>,
timeout_seconds: u64,
) {
let mut config = self.config.write().await;
config.proxy_config.enabled = enabled;
config.proxy_config.upstream_url = upstream_url;
config.proxy_config.timeout_seconds = timeout_seconds;
}
pub async fn update_validation_config(
&self,
mode: String,
aggregate_errors: bool,
validate_responses: bool,
overrides: HashMap<String, String>,
) {
let mut config = self.config.write().await;
config.validation_settings.mode = mode;
config.validation_settings.aggregate_errors = aggregate_errors;
config.validation_settings.validate_responses = validate_responses;
config.validation_settings.overrides = overrides;
}
pub async fn get_logs_filtered(&self, filter: &LogFilter) -> Vec<RequestLog> {
let logs = self.logs.read().await;
logs.iter()
.rev() .filter(|log| {
if let Some(ref method) = filter.method {
if log.method != *method {
return false;
}
}
if let Some(ref path_pattern) = filter.path_pattern {
if !log.path.contains(path_pattern) {
return false;
}
}
if let Some(status) = filter.status_code {
if log.status_code != status {
return false;
}
}
true
})
.take(filter.limit.unwrap_or(100))
.cloned()
.collect()
}
pub async fn clear_logs(&self) {
let mut logs = self.logs.write().await;
logs.clear();
}
}
pub async fn serve_admin_html() -> Html<&'static str> {
Html(crate::get_admin_html())
}
pub async fn serve_admin_css() -> ([(http::HeaderName, &'static str); 1], &'static str) {
([(http::header::CONTENT_TYPE, "text/css")], crate::get_admin_css())
}
pub async fn serve_admin_js() -> ([(http::HeaderName, &'static str); 1], &'static str) {
([(http::header::CONTENT_TYPE, "application/javascript")], crate::get_admin_js())
}
pub async fn get_dashboard(State(state): State<AdminState>) -> Json<ApiResponse<DashboardData>> {
let uptime = Utc::now().signed_duration_since(state.start_time).num_seconds() as u64;
let system_metrics = state.get_system_metrics().await;
let _config = state.get_config().await;
let (recent_logs, calculated_metrics): (Vec<RequestLog>, RequestMetrics) =
if let Some(global_logger) = mockforge_core::get_global_logger() {
let all_logs = global_logger.get_recent_logs(None).await;
let recent_logs_subset = global_logger.get_recent_logs(Some(20)).await;
let total_requests = all_logs.len() as u64;
let mut requests_by_endpoint = HashMap::new();
let mut errors_by_endpoint = HashMap::new();
let mut response_times = Vec::new();
let mut last_request_by_endpoint = HashMap::new();
for log in &all_logs {
let endpoint_key = format!("{} {}", log.method, log.path);
*requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
if log.status_code >= 400 {
*errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
}
response_times.push(log.response_time_ms);
last_request_by_endpoint.insert(endpoint_key, log.timestamp);
}
let calculated_metrics = RequestMetrics {
total_requests,
active_connections: 0, requests_by_endpoint,
response_times,
response_times_by_endpoint: HashMap::new(), errors_by_endpoint,
last_request_by_endpoint,
};
let recent_logs = recent_logs_subset
.into_iter()
.map(|log| RequestLog {
id: log.id,
timestamp: log.timestamp,
method: log.method,
path: log.path,
status_code: log.status_code,
response_time_ms: log.response_time_ms,
client_ip: log.client_ip,
user_agent: log.user_agent,
headers: log.headers,
response_size_bytes: log.response_size_bytes,
error_message: log.error_message,
})
.collect();
(recent_logs, calculated_metrics)
} else {
let logs = state.logs.read().await;
let recent_logs = logs.iter().rev().take(10).cloned().collect();
let metrics = state.get_metrics().await;
(recent_logs, metrics)
};
let metrics = calculated_metrics;
let system_info = SystemInfo {
version: env!("CARGO_PKG_VERSION").to_string(),
uptime_seconds: uptime,
memory_usage_mb: system_metrics.memory_usage_mb,
cpu_usage_percent: system_metrics.cpu_usage_percent,
active_threads: system_metrics.active_threads as usize,
total_routes: metrics.requests_by_endpoint.len(),
total_fixtures: count_fixtures().unwrap_or(0),
};
let servers = vec![
ServerStatus {
server_type: "HTTP".to_string(),
address: state.http_server_addr.map(|addr| addr.to_string()),
running: state.http_server_addr.is_some(),
start_time: Some(state.start_time),
uptime_seconds: Some(uptime),
active_connections: metrics.active_connections,
total_requests: count_requests_by_server_type(&metrics, "HTTP"),
},
ServerStatus {
server_type: "WebSocket".to_string(),
address: state.ws_server_addr.map(|addr| addr.to_string()),
running: state.ws_server_addr.is_some(),
start_time: Some(state.start_time),
uptime_seconds: Some(uptime),
active_connections: metrics.active_connections / 2, total_requests: count_requests_by_server_type(&metrics, "WebSocket"),
},
ServerStatus {
server_type: "gRPC".to_string(),
address: state.grpc_server_addr.map(|addr| addr.to_string()),
running: state.grpc_server_addr.is_some(),
start_time: Some(state.start_time),
uptime_seconds: Some(uptime),
active_connections: metrics.active_connections / 3, total_requests: count_requests_by_server_type(&metrics, "gRPC"),
},
];
let mut routes = Vec::new();
for (endpoint, count) in &metrics.requests_by_endpoint {
let parts: Vec<&str> = endpoint.splitn(2, ' ').collect();
if parts.len() == 2 {
let method = parts[0].to_string();
let path = parts[1].to_string();
let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
routes.push(RouteInfo {
method: Some(method.clone()),
path: path.clone(),
priority: 0,
has_fixtures: route_has_fixtures(&method, &path),
latency_ms: calculate_endpoint_latency(&metrics, endpoint),
request_count: *count,
last_request: get_endpoint_last_request(&metrics, endpoint),
error_count,
});
}
}
let dashboard = DashboardData {
server_info: ServerInfo {
version: env!("CARGO_PKG_VERSION").to_string(),
build_time: option_env!("VERGEN_BUILD_TIMESTAMP").unwrap_or("unknown").to_string(),
git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("unknown").to_string(),
http_server: state.http_server_addr.map(|addr| addr.to_string()),
ws_server: state.ws_server_addr.map(|addr| addr.to_string()),
grpc_server: state.grpc_server_addr.map(|addr| addr.to_string()),
graphql_server: state.graphql_server_addr.map(|addr| addr.to_string()),
api_enabled: state.api_enabled,
admin_port: state.admin_port,
},
system_info: DashboardSystemInfo {
os: std::env::consts::OS.to_string(),
arch: std::env::consts::ARCH.to_string(),
uptime,
memory_usage: system_metrics.memory_usage_mb * 1024 * 1024, },
metrics: SimpleMetricsData {
total_requests: metrics.requests_by_endpoint.values().sum(),
active_requests: metrics.active_connections,
average_response_time: if metrics.response_times.is_empty() {
0.0
} else {
metrics.response_times.iter().sum::<u64>() as f64
/ metrics.response_times.len() as f64
},
error_rate: {
let total_requests = metrics.requests_by_endpoint.values().sum::<u64>();
let total_errors = metrics.errors_by_endpoint.values().sum::<u64>();
if total_requests == 0 {
0.0
} else {
total_errors as f64 / total_requests as f64
}
},
},
servers,
recent_logs,
system: system_info,
};
Json(ApiResponse::success(dashboard))
}
pub async fn get_routes() -> impl IntoResponse {
let routes = mockforge_core::request_logger::get_global_routes();
let json = serde_json::json!({
"routes": routes,
"total": routes.len()
});
(
StatusCode::OK,
[("content-type", "application/json")],
serde_json::to_string(&json).unwrap_or_else(|_| r#"{"routes":[]}"#.to_string()),
)
}
pub async fn get_server_info(State(state): State<AdminState>) -> Json<serde_json::Value> {
Json(json!({
"http_server": state.http_server_addr.map(|addr| addr.to_string()),
"ws_server": state.ws_server_addr.map(|addr| addr.to_string()),
"grpc_server": state.grpc_server_addr.map(|addr| addr.to_string()),
"admin_port": state.admin_port
}))
}
pub async fn get_health() -> Json<HealthCheck> {
Json(
HealthCheck::healthy()
.with_service("http".to_string(), "healthy".to_string())
.with_service("websocket".to_string(), "healthy".to_string())
.with_service("grpc".to_string(), "healthy".to_string()),
)
}
pub async fn get_logs(
State(state): State<AdminState>,
Query(params): Query<HashMap<String, String>>,
) -> Json<ApiResponse<Vec<RequestLog>>> {
let mut filter = LogFilter::default();
if let Some(method) = params.get("method") {
filter.method = Some(method.clone());
}
if let Some(path) = params.get("path") {
filter.path_pattern = Some(path.clone());
}
if let Some(status) = params.get("status").and_then(|s| s.parse().ok()) {
filter.status_code = Some(status);
}
if let Some(limit) = params.get("limit").and_then(|s| s.parse().ok()) {
filter.limit = Some(limit);
}
let logs = if let Some(global_logger) = mockforge_core::get_global_logger() {
let centralized_logs = global_logger.get_recent_logs(filter.limit).await;
centralized_logs
.into_iter()
.filter(|log| {
if let Some(ref method) = filter.method {
if log.method != *method {
return false;
}
}
if let Some(ref path_pattern) = filter.path_pattern {
if !log.path.contains(path_pattern) {
return false;
}
}
if let Some(status) = filter.status_code {
if log.status_code != status {
return false;
}
}
true
})
.map(|log| RequestLog {
id: log.id,
timestamp: log.timestamp,
method: log.method,
path: log.path,
status_code: log.status_code,
response_time_ms: log.response_time_ms,
client_ip: log.client_ip,
user_agent: log.user_agent,
headers: log.headers,
response_size_bytes: log.response_size_bytes,
error_message: log.error_message,
})
.collect()
} else {
state.get_logs_filtered(&filter).await
};
Json(ApiResponse::success(logs))
}
pub async fn get_reality_trace(
Path(request_id): Path<String>,
) -> Json<ApiResponse<Option<mockforge_core::request_logger::RealityTraceMetadata>>> {
if let Some(global_logger) = mockforge_core::get_global_logger() {
let logs = global_logger.get_recent_logs(None).await;
if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
Json(ApiResponse::success(log_entry.reality_metadata))
} else {
Json(ApiResponse::error(format!("Request {} not found", request_id)))
}
} else {
Json(ApiResponse::error("Request logger not initialized".to_string()))
}
}
pub async fn get_response_trace(
Path(request_id): Path<String>,
) -> Json<ApiResponse<Option<serde_json::Value>>> {
if let Some(global_logger) = mockforge_core::get_global_logger() {
let logs = global_logger.get_recent_logs(None).await;
if let Some(log_entry) = logs.into_iter().find(|log| log.id == request_id) {
let trace = log_entry
.metadata
.get("response_generation_trace")
.and_then(|s| serde_json::from_str::<serde_json::Value>(s).ok());
Json(ApiResponse::success(trace))
} else {
Json(ApiResponse::error(format!("Request {} not found", request_id)))
}
} else {
Json(ApiResponse::error("Request logger not initialized".to_string()))
}
}
const RECENT_LOGS_LIMIT: usize = 20;
const RECENT_LOGS_TTL_MINUTES: i64 = 5;
pub async fn logs_sse(
State(_state): State<AdminState>,
) -> Sse<impl Stream<Item = std::result::Result<Event, Infallible>>> {
tracing::info!("SSE endpoint /logs/sse accessed - starting real-time log streaming for recent requests only");
let stream = stream::unfold(std::collections::HashSet::new(), |mut seen_ids| async move {
tokio::time::sleep(Duration::from_millis(500)).await;
if let Some(global_logger) = mockforge_core::get_global_logger() {
let centralized_logs = global_logger.get_recent_logs(Some(RECENT_LOGS_LIMIT)).await;
tracing::debug!(
"SSE: Checking logs - total logs: {}, seen logs: {}",
centralized_logs.len(),
seen_ids.len()
);
let now = Utc::now();
let ttl_cutoff = now - chrono::Duration::minutes(RECENT_LOGS_TTL_MINUTES);
let new_logs: Vec<RequestLog> = centralized_logs
.into_iter()
.filter(|log| {
log.timestamp > ttl_cutoff && !seen_ids.contains(&log.id)
})
.map(|log| RequestLog {
id: log.id,
timestamp: log.timestamp,
method: log.method,
path: log.path,
status_code: log.status_code,
response_time_ms: log.response_time_ms,
client_ip: log.client_ip,
user_agent: log.user_agent,
headers: log.headers,
response_size_bytes: log.response_size_bytes,
error_message: log.error_message,
})
.collect();
for log in &new_logs {
seen_ids.insert(log.id.clone());
}
if !new_logs.is_empty() {
tracing::info!("SSE: Sending {} new logs to client", new_logs.len());
let event_data = serde_json::to_string(&new_logs).unwrap_or_default();
let event = Ok(Event::default().event("new_logs").data(event_data));
return Some((event, seen_ids));
}
}
let event = Ok(Event::default().event("keep_alive").data(""));
Some((event, seen_ids))
});
Sse::new(stream).keep_alive(
axum::response::sse::KeepAlive::new()
.interval(Duration::from_secs(15))
.text("keep-alive-text"),
)
}
pub async fn get_metrics(State(state): State<AdminState>) -> Json<ApiResponse<MetricsData>> {
let metrics = if let Some(global_logger) = mockforge_core::get_global_logger() {
let all_logs = global_logger.get_recent_logs(None).await;
let total_requests = all_logs.len() as u64;
let mut requests_by_endpoint = HashMap::new();
let mut errors_by_endpoint = HashMap::new();
let mut response_times = Vec::new();
let mut last_request_by_endpoint = HashMap::new();
for log in &all_logs {
let endpoint_key = format!("{} {}", log.method, log.path);
*requests_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
if log.status_code >= 400 {
*errors_by_endpoint.entry(endpoint_key.clone()).or_insert(0) += 1;
}
response_times.push(log.response_time_ms);
last_request_by_endpoint.insert(endpoint_key, log.timestamp);
}
RequestMetrics {
total_requests,
active_connections: 0,
requests_by_endpoint,
response_times,
response_times_by_endpoint: HashMap::new(),
errors_by_endpoint,
last_request_by_endpoint,
}
} else {
state.get_metrics().await
};
let system_metrics = state.get_system_metrics().await;
let time_series = state.get_time_series_data().await;
fn calculate_percentile(sorted_data: &[u64], percentile: f64) -> u64 {
if sorted_data.is_empty() {
return 0;
}
let idx = ((sorted_data.len() as f64) * percentile).ceil() as usize;
let idx = idx.min(sorted_data.len().saturating_sub(1));
sorted_data[idx]
}
let mut response_times = metrics.response_times.clone();
response_times.sort();
let p50 = calculate_percentile(&response_times, 0.50);
let p75 = calculate_percentile(&response_times, 0.75);
let p90 = calculate_percentile(&response_times, 0.90);
let p95 = calculate_percentile(&response_times, 0.95);
let p99 = calculate_percentile(&response_times, 0.99);
let p999 = calculate_percentile(&response_times, 0.999);
let mut response_times_by_endpoint: HashMap<String, Vec<u64>> = HashMap::new();
if let Some(global_logger) = mockforge_core::get_global_logger() {
let all_logs = global_logger.get_recent_logs(None).await;
for log in &all_logs {
let endpoint_key = format!("{} {}", log.method, log.path);
response_times_by_endpoint
.entry(endpoint_key)
.or_default()
.push(log.response_time_ms);
}
}
let mut endpoint_percentiles: HashMap<String, HashMap<String, u64>> = HashMap::new();
for (endpoint, times) in &mut response_times_by_endpoint {
times.sort();
if !times.is_empty() {
endpoint_percentiles.insert(
endpoint.clone(),
HashMap::from([
("p50".to_string(), calculate_percentile(times, 0.50)),
("p75".to_string(), calculate_percentile(times, 0.75)),
("p90".to_string(), calculate_percentile(times, 0.90)),
("p95".to_string(), calculate_percentile(times, 0.95)),
("p99".to_string(), calculate_percentile(times, 0.99)),
("p999".to_string(), calculate_percentile(times, 0.999)),
]),
);
}
}
let mut error_rate_by_endpoint = HashMap::new();
for (endpoint, total_count) in &metrics.requests_by_endpoint {
let error_count = *metrics.errors_by_endpoint.get(endpoint).unwrap_or(&0);
let error_rate = if *total_count > 0 {
error_count as f64 / *total_count as f64
} else {
0.0
};
error_rate_by_endpoint.insert(endpoint.clone(), error_rate);
}
let memory_usage_over_time = if time_series.memory_usage.is_empty() {
vec![(Utc::now(), system_metrics.memory_usage_mb)]
} else {
time_series
.memory_usage
.iter()
.map(|point| (point.timestamp, point.value as u64))
.collect()
};
let cpu_usage_over_time = if time_series.cpu_usage.is_empty() {
vec![(Utc::now(), system_metrics.cpu_usage_percent)]
} else {
time_series
.cpu_usage
.iter()
.map(|point| (point.timestamp, point.value))
.collect()
};
let latency_over_time: Vec<(chrono::DateTime<Utc>, u64)> =
if let Some(global_logger) = mockforge_core::get_global_logger() {
let all_logs = global_logger.get_recent_logs(Some(100)).await;
all_logs.iter().map(|log| (log.timestamp, log.response_time_ms)).collect()
} else {
Vec::new()
};
let metrics_data = MetricsData {
requests_by_endpoint: metrics.requests_by_endpoint,
response_time_percentiles: HashMap::from([
("p50".to_string(), p50),
("p75".to_string(), p75),
("p90".to_string(), p90),
("p95".to_string(), p95),
("p99".to_string(), p99),
("p999".to_string(), p999),
]),
endpoint_percentiles: Some(endpoint_percentiles),
latency_over_time: Some(latency_over_time),
error_rate_by_endpoint,
memory_usage_over_time,
cpu_usage_over_time,
};
Json(ApiResponse::success(metrics_data))
}
pub async fn update_latency(
State(state): State<AdminState>,
headers: http::HeaderMap,
Json(update): Json<ConfigUpdate>,
) -> Json<ApiResponse<String>> {
use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
use crate::rbac::{extract_user_context, get_default_user_context};
if update.config_type != "latency" {
return Json(ApiResponse::error("Invalid config type".to_string()));
}
let base_ms = update.data.get("base_ms").and_then(|v| v.as_u64()).unwrap_or(50);
let jitter_ms = update.data.get("jitter_ms").and_then(|v| v.as_u64()).unwrap_or(20);
let tag_overrides: HashMap<String, u64> = update
.data
.get("tag_overrides")
.and_then(|v| v.as_object())
.map(|obj| obj.iter().filter_map(|(k, v)| v.as_u64().map(|val| (k.clone(), val))).collect())
.unwrap_or_default();
state.update_latency_config(base_ms, jitter_ms, tag_overrides.clone()).await;
if let Some(audit_store) = get_global_audit_store() {
let metadata = serde_json::json!({
"base_ms": base_ms,
"jitter_ms": jitter_ms,
"tag_overrides": tag_overrides,
});
let mut audit_log = create_audit_log(
AdminActionType::ConfigLatencyUpdated,
format!("Latency profile updated: base_ms={}, jitter_ms={}", base_ms, jitter_ms),
None,
true,
None,
Some(metadata),
);
if let Some(user_ctx) = extract_user_context(&headers).or_else(get_default_user_context) {
audit_log.user_id = Some(user_ctx.user_id);
audit_log.username = Some(user_ctx.username);
}
if let Some(ip) = headers
.get("x-forwarded-for")
.or_else(|| headers.get("x-real-ip"))
.and_then(|h| h.to_str().ok())
{
audit_log.ip_address = Some(ip.to_string());
}
if let Some(ua) = headers.get("user-agent").and_then(|h| h.to_str().ok()) {
audit_log.user_agent = Some(ua.to_string());
}
audit_store.record(audit_log).await;
}
tracing::info!("Updated latency profile: base_ms={}, jitter_ms={}", base_ms, jitter_ms);
Json(ApiResponse::success("Latency profile updated".to_string()))
}
pub async fn update_faults(
State(state): State<AdminState>,
Json(update): Json<ConfigUpdate>,
) -> Json<ApiResponse<String>> {
if update.config_type != "faults" {
return Json(ApiResponse::error("Invalid config type".to_string()));
}
let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
let failure_rate = update.data.get("failure_rate").and_then(|v| v.as_f64()).unwrap_or(0.0);
let status_codes = update
.data
.get("status_codes")
.and_then(|v| v.as_array())
.map(|arr| arr.iter().filter_map(|v| v.as_u64().map(|n| n as u16)).collect())
.unwrap_or_else(|| vec![500, 502, 503]);
state.update_fault_config(enabled, failure_rate, status_codes).await;
tracing::info!(
"Updated fault configuration: enabled={}, failure_rate={}",
enabled,
failure_rate
);
Json(ApiResponse::success("Fault configuration updated".to_string()))
}
pub async fn update_proxy(
State(state): State<AdminState>,
Json(update): Json<ConfigUpdate>,
) -> Json<ApiResponse<String>> {
if update.config_type != "proxy" {
return Json(ApiResponse::error("Invalid config type".to_string()));
}
let enabled = update.data.get("enabled").and_then(|v| v.as_bool()).unwrap_or(false);
let upstream_url =
update.data.get("upstream_url").and_then(|v| v.as_str()).map(|s| s.to_string());
let timeout_seconds = update.data.get("timeout_seconds").and_then(|v| v.as_u64()).unwrap_or(30);
state.update_proxy_config(enabled, upstream_url.clone(), timeout_seconds).await;
tracing::info!(
"Updated proxy configuration: enabled={}, upstream_url={:?}",
enabled,
upstream_url
);
Json(ApiResponse::success("Proxy configuration updated".to_string()))
}
pub async fn clear_logs(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
state.clear_logs().await;
tracing::info!("Cleared all request logs");
Json(ApiResponse::success("Logs cleared".to_string()))
}
pub async fn restart_servers(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
use crate::audit::{create_audit_log, get_global_audit_store, AdminActionType};
let current_status = state.get_restart_status().await;
if current_status.in_progress {
return Json(ApiResponse::error("Server restart already in progress".to_string()));
}
let restart_result = state
.initiate_restart("Manual restart requested via admin UI".to_string())
.await;
let success = restart_result.is_ok();
let error_msg = restart_result.as_ref().err().map(|e| format!("{}", e));
if let Some(audit_store) = get_global_audit_store() {
let audit_log = create_audit_log(
AdminActionType::ServerRestarted,
"Server restart initiated via admin UI".to_string(),
None,
success,
error_msg.clone(),
None,
);
audit_store.record(audit_log).await;
}
if let Err(e) = restart_result {
return Json(ApiResponse::error(format!("Failed to initiate restart: {}", e)));
}
let state_clone = state.clone();
tokio::spawn(async move {
if let Err(e) = perform_server_restart(&state_clone).await {
tracing::error!("Server restart failed: {}", e);
state_clone.complete_restart(false).await;
} else {
tracing::info!("Server restart completed successfully");
state_clone.complete_restart(true).await;
}
});
tracing::info!("Server restart initiated via admin UI");
Json(ApiResponse::success(
"Server restart initiated. Please wait for completion.".to_string(),
))
}
async fn perform_server_restart(_state: &AdminState) -> Result<()> {
let current_pid = std::process::id();
tracing::info!("Initiating restart for process PID: {}", current_pid);
let parent_pid = get_parent_process_id(current_pid).await?;
tracing::info!("Found parent process PID: {}", parent_pid);
if let Ok(()) = restart_via_parent_signal(parent_pid).await {
tracing::info!("Restart initiated via parent process signal");
return Ok(());
}
if let Ok(()) = restart_via_process_replacement().await {
tracing::info!("Restart initiated via process replacement");
return Ok(());
}
restart_via_script().await
}
async fn get_parent_process_id(pid: u32) -> Result<u32> {
#[cfg(target_os = "linux")]
{
let stat_path = format!("/proc/{}/stat", pid);
if let Ok(ppid) = tokio::task::spawn_blocking(move || -> Result<u32> {
let content = std::fs::read_to_string(&stat_path)
.map_err(|e| Error::generic(format!("Failed to read {}: {}", stat_path, e)))?;
let fields: Vec<&str> = content.split_whitespace().collect();
if fields.len() > 3 {
fields[3]
.parse::<u32>()
.map_err(|e| Error::generic(format!("Failed to parse PPID: {}", e)))
} else {
Err(Error::generic("Insufficient fields in /proc/pid/stat".to_string()))
}
})
.await
{
return ppid;
}
}
Ok(1) }
async fn restart_via_parent_signal(parent_pid: u32) -> Result<()> {
#[cfg(unix)]
{
use std::process::Command;
let output = Command::new("kill")
.args(["-TERM", &parent_pid.to_string()])
.output()
.map_err(|e| Error::generic(format!("Failed to send signal: {}", e)))?;
if !output.status.success() {
return Err(Error::generic(
"Failed to send restart signal to parent process".to_string(),
));
}
tokio::time::sleep(Duration::from_millis(100)).await;
Ok(())
}
#[cfg(not(unix))]
{
Err(Error::generic(
"Signal-based restart not supported on this platform".to_string(),
))
}
}
async fn restart_via_process_replacement() -> Result<()> {
let current_exe = std::env::current_exe()
.map_err(|e| Error::generic(format!("Failed to get current executable: {}", e)))?;
let args: Vec<String> = std::env::args().collect();
tracing::info!("Restarting with command: {:?}", args);
let mut child = Command::new(¤t_exe)
.args(&args[1..]) .stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.spawn()
.map_err(|e| Error::generic(format!("Failed to start new process: {}", e)))?;
tokio::time::sleep(Duration::from_millis(500)).await;
match child.try_wait() {
Ok(Some(status)) => {
if status.success() {
tracing::info!("New process started successfully");
Ok(())
} else {
Err(Error::generic("New process exited with error".to_string()))
}
}
Ok(None) => {
tracing::info!("New process is running, exiting current process");
std::process::exit(0);
}
Err(e) => Err(Error::generic(format!("Failed to check new process status: {}", e))),
}
}
async fn restart_via_script() -> Result<()> {
let script_paths = ["./scripts/restart.sh", "./restart.sh", "restart.sh"];
for script_path in &script_paths {
if std::path::Path::new(script_path).exists() {
tracing::info!("Using restart script: {}", script_path);
let output = Command::new("bash")
.arg(script_path)
.output()
.map_err(|e| Error::generic(format!("Failed to execute restart script: {}", e)))?;
if output.status.success() {
return Ok(());
} else {
tracing::warn!(
"Restart script failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
}
}
let clear_script = "./scripts/clear-ports.sh";
if std::path::Path::new(clear_script).exists() {
tracing::info!("Using clear-ports script as fallback");
let _ = Command::new("bash").arg(clear_script).output();
}
Err(Error::generic(
"No restart mechanism available. Please restart manually.".to_string(),
))
}
pub async fn get_restart_status(
State(state): State<AdminState>,
) -> Json<ApiResponse<RestartStatus>> {
let status = state.get_restart_status().await;
Json(ApiResponse::success(status))
}
pub async fn get_audit_logs(
Query(params): Query<HashMap<String, String>>,
) -> Json<ApiResponse<Vec<crate::audit::AdminAuditLog>>> {
use crate::audit::{get_global_audit_store, AdminActionType};
let action_type_str = params.get("action_type");
let user_id = params.get("user_id").map(|s| s.as_str());
let limit = params.get("limit").and_then(|s| s.parse::<usize>().ok());
let offset = params.get("offset").and_then(|s| s.parse::<usize>().ok());
let action_type = action_type_str.and_then(|s| {
match s.as_str() {
"config_latency_updated" => Some(AdminActionType::ConfigLatencyUpdated),
"config_faults_updated" => Some(AdminActionType::ConfigFaultsUpdated),
"server_restarted" => Some(AdminActionType::ServerRestarted),
"logs_cleared" => Some(AdminActionType::LogsCleared),
_ => None,
}
});
if let Some(audit_store) = get_global_audit_store() {
let logs = audit_store.get_logs(action_type, user_id, limit, offset).await;
Json(ApiResponse::success(logs))
} else {
Json(ApiResponse::error("Audit logging not initialized".to_string()))
}
}
pub async fn get_audit_stats() -> Json<ApiResponse<crate::audit::AuditLogStats>> {
use crate::audit::get_global_audit_store;
if let Some(audit_store) = get_global_audit_store() {
let stats = audit_store.get_stats().await;
Json(ApiResponse::success(stats))
} else {
Json(ApiResponse::error("Audit logging not initialized".to_string()))
}
}
pub async fn get_config(State(state): State<AdminState>) -> Json<ApiResponse<serde_json::Value>> {
let config_state = state.get_config().await;
let config = json!({
"latency": {
"enabled": true,
"base_ms": config_state.latency_profile.base_ms,
"jitter_ms": config_state.latency_profile.jitter_ms,
"tag_overrides": config_state.latency_profile.tag_overrides
},
"faults": {
"enabled": config_state.fault_config.enabled,
"failure_rate": config_state.fault_config.failure_rate,
"status_codes": config_state.fault_config.status_codes
},
"proxy": {
"enabled": config_state.proxy_config.enabled,
"upstream_url": config_state.proxy_config.upstream_url,
"timeout_seconds": config_state.proxy_config.timeout_seconds
},
"traffic_shaping": {
"enabled": config_state.traffic_shaping.enabled,
"bandwidth": config_state.traffic_shaping.bandwidth,
"burst_loss": config_state.traffic_shaping.burst_loss
},
"validation": {
"mode": config_state.validation_settings.mode,
"aggregate_errors": config_state.validation_settings.aggregate_errors,
"validate_responses": config_state.validation_settings.validate_responses,
"overrides": config_state.validation_settings.overrides
}
});
Json(ApiResponse::success(config))
}
pub fn count_fixtures() -> Result<usize> {
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return Ok(0);
}
let mut total_count = 0;
let http_fixtures_path = fixtures_path.join("http");
if http_fixtures_path.exists() {
total_count += count_fixtures_in_directory(&http_fixtures_path)?;
}
let ws_fixtures_path = fixtures_path.join("websocket");
if ws_fixtures_path.exists() {
total_count += count_fixtures_in_directory(&ws_fixtures_path)?;
}
let grpc_fixtures_path = fixtures_path.join("grpc");
if grpc_fixtures_path.exists() {
total_count += count_fixtures_in_directory(&grpc_fixtures_path)?;
}
Ok(total_count)
}
fn count_fixtures_in_directory(dir_path: &std::path::Path) -> Result<usize> {
let mut count = 0;
if let Ok(entries) = std::fs::read_dir(dir_path) {
for entry in entries {
let entry = entry
.map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.is_dir() {
count += count_fixtures_in_directory(&path)?;
} else if path.extension().and_then(|s| s.to_str()) == Some("json") {
count += 1;
}
}
}
Ok(count)
}
pub fn route_has_fixtures(method: &str, path: &str) -> bool {
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return false;
}
let method_lower = method.to_lowercase();
let path_hash = path.replace(['/', ':'], "_");
let http_fixtures_path = fixtures_path.join("http").join(&method_lower).join(&path_hash);
if http_fixtures_path.exists() {
if let Ok(entries) = std::fs::read_dir(&http_fixtures_path) {
for entry in entries.flatten() {
if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
return true;
}
}
}
}
if method.to_uppercase() == "WS" {
let ws_fixtures_path = fixtures_path.join("websocket").join(&path_hash);
if ws_fixtures_path.exists() {
if let Ok(entries) = std::fs::read_dir(&ws_fixtures_path) {
for entry in entries.flatten() {
if entry.path().extension().and_then(|s| s.to_str()) == Some("json") {
return true;
}
}
}
}
}
false
}
fn calculate_endpoint_latency(metrics: &RequestMetrics, endpoint: &str) -> Option<u64> {
metrics.response_times_by_endpoint.get(endpoint).and_then(|times| {
if times.is_empty() {
None
} else {
let sum: u64 = times.iter().sum();
Some(sum / times.len() as u64)
}
})
}
fn get_endpoint_last_request(
metrics: &RequestMetrics,
endpoint: &str,
) -> Option<chrono::DateTime<Utc>> {
metrics.last_request_by_endpoint.get(endpoint).copied()
}
fn count_requests_by_server_type(metrics: &RequestMetrics, server_type: &str) -> u64 {
match server_type {
"HTTP" => {
metrics
.requests_by_endpoint
.iter()
.filter(|(endpoint, _)| {
let method = endpoint.split(' ').next().unwrap_or("");
matches!(
method,
"GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "HEAD" | "OPTIONS"
)
})
.map(|(_, count)| count)
.sum()
}
"WebSocket" => {
metrics
.requests_by_endpoint
.iter()
.filter(|(endpoint, _)| {
let method = endpoint.split(' ').next().unwrap_or("");
method == "WS"
})
.map(|(_, count)| count)
.sum()
}
"gRPC" => {
metrics
.requests_by_endpoint
.iter()
.filter(|(endpoint, _)| {
let method = endpoint.split(' ').next().unwrap_or("");
method == "gRPC"
})
.map(|(_, count)| count)
.sum()
}
_ => 0,
}
}
pub async fn get_fixtures() -> Json<ApiResponse<Vec<FixtureInfo>>> {
match scan_fixtures_directory() {
Ok(fixtures) => Json(ApiResponse::success(fixtures)),
Err(e) => {
tracing::error!("Failed to scan fixtures directory: {}", e);
Json(ApiResponse::error(format!("Failed to load fixtures: {}", e)))
}
}
}
fn scan_fixtures_directory() -> Result<Vec<FixtureInfo>> {
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
tracing::warn!("Fixtures directory does not exist: {}", fixtures_dir);
return Ok(Vec::new());
}
let mut all_fixtures = Vec::new();
let http_fixtures = scan_protocol_fixtures(fixtures_path, "http")?;
all_fixtures.extend(http_fixtures);
let ws_fixtures = scan_protocol_fixtures(fixtures_path, "websocket")?;
all_fixtures.extend(ws_fixtures);
let grpc_fixtures = scan_protocol_fixtures(fixtures_path, "grpc")?;
all_fixtures.extend(grpc_fixtures);
all_fixtures.sort_by(|a, b| b.saved_at.cmp(&a.saved_at));
tracing::info!("Found {} fixtures in directory: {}", all_fixtures.len(), fixtures_dir);
Ok(all_fixtures)
}
fn scan_protocol_fixtures(
fixtures_path: &std::path::Path,
protocol: &str,
) -> Result<Vec<FixtureInfo>> {
let protocol_path = fixtures_path.join(protocol);
let mut fixtures = Vec::new();
if !protocol_path.exists() {
return Ok(fixtures);
}
if let Ok(entries) = std::fs::read_dir(&protocol_path) {
for entry in entries {
let entry = entry
.map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.is_dir() {
let sub_fixtures = scan_directory_recursive(&path, protocol)?;
fixtures.extend(sub_fixtures);
} else if path.extension().and_then(|s| s.to_str()) == Some("json") {
if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
fixtures.push(fixture);
}
}
}
}
Ok(fixtures)
}
fn scan_directory_recursive(
dir_path: &std::path::Path,
protocol: &str,
) -> Result<Vec<FixtureInfo>> {
let mut fixtures = Vec::new();
if let Ok(entries) = std::fs::read_dir(dir_path) {
for entry in entries {
let entry = entry
.map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.is_dir() {
let sub_fixtures = scan_directory_recursive(&path, protocol)?;
fixtures.extend(sub_fixtures);
} else if path.extension().and_then(|s| s.to_str()) == Some("json") {
if let Ok(fixture) = parse_fixture_file_sync(&path, protocol) {
fixtures.push(fixture);
}
}
}
}
Ok(fixtures)
}
fn parse_fixture_file_sync(file_path: &std::path::Path, protocol: &str) -> Result<FixtureInfo> {
let metadata = std::fs::metadata(file_path)
.map_err(|e| Error::generic(format!("Failed to read file metadata: {}", e)))?;
let file_size = metadata.len();
let modified_time = metadata
.modified()
.map_err(|e| Error::generic(format!("Failed to get file modification time: {}", e)))?;
let saved_at = chrono::DateTime::from(modified_time);
let content = std::fs::read_to_string(file_path)
.map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
let fixture_data: serde_json::Value = serde_json::from_str(&content)
.map_err(|e| Error::generic(format!("Failed to parse fixture JSON: {}", e)))?;
let (method, path) = extract_method_and_path(&fixture_data, protocol)?;
let id = generate_fixture_id(file_path, &content);
let fingerprint = extract_fingerprint(file_path, &fixture_data)?;
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
let file_path_str = file_path
.strip_prefix(fixtures_path)
.unwrap_or(file_path)
.to_string_lossy()
.to_string();
Ok(FixtureInfo {
id,
protocol: protocol.to_string(),
method,
path,
saved_at,
file_size,
file_path: file_path_str,
fingerprint,
metadata: fixture_data,
})
}
fn extract_method_and_path(
fixture_data: &serde_json::Value,
protocol: &str,
) -> Result<(String, String)> {
match protocol {
"http" => {
let method = fixture_data
.get("request")
.and_then(|req| req.get("method"))
.and_then(|m| m.as_str())
.unwrap_or("UNKNOWN")
.to_uppercase();
let path = fixture_data
.get("request")
.and_then(|req| req.get("path"))
.and_then(|p| p.as_str())
.unwrap_or("/unknown")
.to_string();
Ok((method, path))
}
"websocket" => {
let path = fixture_data
.get("path")
.and_then(|p| p.as_str())
.or_else(|| {
fixture_data
.get("request")
.and_then(|req| req.get("path"))
.and_then(|p| p.as_str())
})
.unwrap_or("/ws")
.to_string();
Ok(("WS".to_string(), path))
}
"grpc" => {
let service =
fixture_data.get("service").and_then(|s| s.as_str()).unwrap_or("UnknownService");
let method =
fixture_data.get("method").and_then(|m| m.as_str()).unwrap_or("UnknownMethod");
let path = format!("/{}/{}", service, method);
Ok(("gRPC".to_string(), path))
}
_ => {
let path = fixture_data
.get("path")
.and_then(|p| p.as_str())
.unwrap_or("/unknown")
.to_string();
Ok((protocol.to_uppercase(), path))
}
}
}
fn generate_fixture_id(file_path: &std::path::Path, content: &str) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
file_path.hash(&mut hasher);
content.hash(&mut hasher);
format!("fixture_{:x}", hasher.finish())
}
fn extract_fingerprint(
file_path: &std::path::Path,
fixture_data: &serde_json::Value,
) -> Result<String> {
if let Some(fingerprint) = fixture_data.get("fingerprint").and_then(|f| f.as_str()) {
return Ok(fingerprint.to_string());
}
if let Some(file_name) = file_path.file_stem().and_then(|s| s.to_str()) {
if let Some(hash) = file_name.split('_').next_back() {
if hash.len() >= 8 && hash.chars().all(|c| c.is_alphanumeric()) {
return Ok(hash.to_string());
}
}
}
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
file_path.hash(&mut hasher);
Ok(format!("{:x}", hasher.finish()))
}
pub async fn delete_fixture(
Json(payload): Json<FixtureDeleteRequest>,
) -> Json<ApiResponse<String>> {
match delete_fixture_by_id(&payload.fixture_id).await {
Ok(_) => {
tracing::info!("Successfully deleted fixture: {}", payload.fixture_id);
Json(ApiResponse::success("Fixture deleted successfully".to_string()))
}
Err(e) => {
tracing::error!("Failed to delete fixture {}: {}", payload.fixture_id, e);
Json(ApiResponse::error(format!("Failed to delete fixture: {}", e)))
}
}
}
pub async fn delete_fixtures_bulk(
Json(payload): Json<FixtureBulkDeleteRequest>,
) -> Json<ApiResponse<FixtureBulkDeleteResult>> {
let mut deleted_count = 0;
let mut errors = Vec::new();
for fixture_id in &payload.fixture_ids {
match delete_fixture_by_id(fixture_id).await {
Ok(_) => {
deleted_count += 1;
tracing::info!("Successfully deleted fixture: {}", fixture_id);
}
Err(e) => {
errors.push(format!("Failed to delete {}: {}", fixture_id, e));
tracing::error!("Failed to delete fixture {}: {}", fixture_id, e);
}
}
}
let result = FixtureBulkDeleteResult {
deleted_count,
total_requested: payload.fixture_ids.len(),
errors: errors.clone(),
};
if errors.is_empty() {
Json(ApiResponse::success(result))
} else {
Json(ApiResponse::error(format!(
"Partial success: {} deleted, {} errors",
deleted_count,
errors.len()
)))
}
}
async fn delete_fixture_by_id(fixture_id: &str) -> Result<()> {
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
}
let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
let file_path_clone = file_path.clone();
tokio::task::spawn_blocking(move || {
if file_path_clone.exists() {
std::fs::remove_file(&file_path_clone).map_err(|e| {
Error::generic(format!(
"Failed to delete fixture file {}: {}",
file_path_clone.display(),
e
))
})
} else {
Err(Error::generic(format!("Fixture file not found: {}", file_path_clone.display())))
}
})
.await
.map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
tracing::info!("Deleted fixture file: {}", file_path.display());
cleanup_empty_directories(&file_path).await;
Ok(())
}
fn find_fixture_file_by_id(
fixtures_path: &std::path::Path,
fixture_id: &str,
) -> Result<std::path::PathBuf> {
let protocols = ["http", "websocket", "grpc"];
for protocol in &protocols {
let protocol_path = fixtures_path.join(protocol);
if let Ok(found_path) = search_fixture_in_directory(&protocol_path, fixture_id) {
return Ok(found_path);
}
}
Err(Error::generic(format!(
"Fixture with ID '{}' not found in any protocol directory",
fixture_id
)))
}
fn search_fixture_in_directory(
dir_path: &std::path::Path,
fixture_id: &str,
) -> Result<std::path::PathBuf> {
if !dir_path.exists() {
return Err(Error::generic(format!("Directory does not exist: {}", dir_path.display())));
}
if let Ok(entries) = std::fs::read_dir(dir_path) {
for entry in entries {
let entry = entry
.map_err(|e| Error::generic(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.is_dir() {
if let Ok(found_path) = search_fixture_in_directory(&path, fixture_id) {
return Ok(found_path);
}
} else if path.extension().and_then(|s| s.to_str()) == Some("json") {
if let Ok(fixture_info) = parse_fixture_file_sync(&path, "unknown") {
if fixture_info.id == fixture_id {
return Ok(path);
}
}
}
}
}
Err(Error::generic(format!(
"Fixture not found in directory: {}",
dir_path.display()
)))
}
async fn cleanup_empty_directories(file_path: &std::path::Path) {
let file_path = file_path.to_path_buf();
let _ = tokio::task::spawn_blocking(move || {
if let Some(parent) = file_path.parent() {
let mut current = parent;
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
while current != fixtures_path && current.parent().is_some() {
if let Ok(entries) = std::fs::read_dir(current) {
if entries.count() == 0 {
if let Err(e) = std::fs::remove_dir(current) {
tracing::debug!(
"Failed to remove empty directory {}: {}",
current.display(),
e
);
break;
} else {
tracing::debug!("Removed empty directory: {}", current.display());
}
} else {
break;
}
} else {
break;
}
if let Some(next_parent) = current.parent() {
current = next_parent;
} else {
break;
}
}
}
})
.await;
}
pub async fn download_fixture(Path(fixture_id): Path<String>) -> impl IntoResponse {
match download_fixture_by_id(&fixture_id).await {
Ok((content, file_name)) => (
StatusCode::OK,
[
(http::header::CONTENT_TYPE, "application/json".to_string()),
(
http::header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", file_name),
),
],
content,
)
.into_response(),
Err(e) => {
tracing::error!("Failed to download fixture {}: {}", fixture_id, e);
let error_response = format!(r#"{{"error": "Failed to download fixture: {}"}}"#, e);
(
StatusCode::NOT_FOUND,
[(http::header::CONTENT_TYPE, "application/json".to_string())],
error_response,
)
.into_response()
}
}
}
async fn download_fixture_by_id(fixture_id: &str) -> Result<(String, String)> {
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
}
let file_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
let file_path_clone = file_path.clone();
let (content, file_name) = tokio::task::spawn_blocking(move || {
let content = std::fs::read_to_string(&file_path_clone)
.map_err(|e| Error::generic(format!("Failed to read fixture file: {}", e)))?;
let file_name = file_path_clone
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("fixture.json")
.to_string();
Ok::<_, Error>((content, file_name))
})
.await
.map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
tracing::info!("Downloaded fixture file: {} ({} bytes)", file_path.display(), content.len());
Ok((content, file_name))
}
pub async fn rename_fixture(
Path(fixture_id): Path<String>,
Json(payload): Json<FixtureRenameRequest>,
) -> Json<ApiResponse<String>> {
match rename_fixture_by_id(&fixture_id, &payload.new_name).await {
Ok(new_path) => {
tracing::info!("Successfully renamed fixture: {} -> {}", fixture_id, payload.new_name);
Json(ApiResponse::success(format!("Fixture renamed successfully to: {}", new_path)))
}
Err(e) => {
tracing::error!("Failed to rename fixture {}: {}", fixture_id, e);
Json(ApiResponse::error(format!("Failed to rename fixture: {}", e)))
}
}
}
async fn rename_fixture_by_id(fixture_id: &str, new_name: &str) -> Result<String> {
if new_name.is_empty() {
return Err(Error::generic("New name cannot be empty".to_string()));
}
let new_name = if new_name.ends_with(".json") {
new_name.to_string()
} else {
format!("{}.json", new_name)
};
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
}
let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
let parent = old_path
.parent()
.ok_or_else(|| Error::generic("Could not determine parent directory".to_string()))?;
let new_path = parent.join(&new_name);
if new_path.exists() {
return Err(Error::generic(format!(
"A fixture with name '{}' already exists in the same directory",
new_name
)));
}
let old_path_clone = old_path.clone();
let new_path_clone = new_path.clone();
tokio::task::spawn_blocking(move || {
std::fs::rename(&old_path_clone, &new_path_clone)
.map_err(|e| Error::generic(format!("Failed to rename fixture file: {}", e)))
})
.await
.map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
tracing::info!("Renamed fixture file: {} -> {}", old_path.display(), new_path.display());
Ok(new_path
.strip_prefix(fixtures_path)
.unwrap_or(&new_path)
.to_string_lossy()
.to_string())
}
pub async fn move_fixture(
Path(fixture_id): Path<String>,
Json(payload): Json<FixtureMoveRequest>,
) -> Json<ApiResponse<String>> {
match move_fixture_by_id(&fixture_id, &payload.new_path).await {
Ok(new_location) => {
tracing::info!("Successfully moved fixture: {} -> {}", fixture_id, payload.new_path);
Json(ApiResponse::success(format!("Fixture moved successfully to: {}", new_location)))
}
Err(e) => {
tracing::error!("Failed to move fixture {}: {}", fixture_id, e);
Json(ApiResponse::error(format!("Failed to move fixture: {}", e)))
}
}
}
async fn move_fixture_by_id(fixture_id: &str, new_path: &str) -> Result<String> {
if new_path.is_empty() {
return Err(Error::generic("New path cannot be empty".to_string()));
}
let fixtures_dir =
std::env::var("MOCKFORGE_FIXTURES_DIR").unwrap_or_else(|_| "fixtures".to_string());
let fixtures_path = std::path::Path::new(&fixtures_dir);
if !fixtures_path.exists() {
return Err(Error::generic(format!("Fixtures directory does not exist: {}", fixtures_dir)));
}
let old_path = find_fixture_file_by_id(fixtures_path, fixture_id)?;
let new_full_path = if new_path.starts_with('/') {
fixtures_path.join(new_path.trim_start_matches('/'))
} else {
fixtures_path.join(new_path)
};
let new_full_path = if new_full_path.extension().and_then(|s| s.to_str()) == Some("json") {
new_full_path
} else {
if new_full_path.is_dir() || !new_path.contains('.') {
let file_name = old_path.file_name().ok_or_else(|| {
Error::generic("Could not determine original file name".to_string())
})?;
new_full_path.join(file_name)
} else {
new_full_path.with_extension("json")
}
};
if new_full_path.exists() {
return Err(Error::generic(format!(
"A fixture already exists at path: {}",
new_full_path.display()
)));
}
let old_path_clone = old_path.clone();
let new_full_path_clone = new_full_path.clone();
tokio::task::spawn_blocking(move || {
if let Some(parent) = new_full_path_clone.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| Error::generic(format!("Failed to create target directory: {}", e)))?;
}
std::fs::rename(&old_path_clone, &new_full_path_clone)
.map_err(|e| Error::generic(format!("Failed to move fixture file: {}", e)))
})
.await
.map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
tracing::info!("Moved fixture file: {} -> {}", old_path.display(), new_full_path.display());
cleanup_empty_directories(&old_path).await;
Ok(new_full_path
.strip_prefix(fixtures_path)
.unwrap_or(&new_full_path)
.to_string_lossy()
.to_string())
}
pub async fn get_validation(
State(state): State<AdminState>,
) -> Json<ApiResponse<ValidationSettings>> {
let config_state = state.get_config().await;
Json(ApiResponse::success(config_state.validation_settings))
}
pub async fn update_validation(
State(state): State<AdminState>,
Json(update): Json<ValidationUpdate>,
) -> Json<ApiResponse<String>> {
match update.mode.as_str() {
"enforce" | "warn" | "off" => {}
_ => {
return Json(ApiResponse::error(
"Invalid validation mode. Must be 'enforce', 'warn', or 'off'".to_string(),
))
}
}
let mode = update.mode.clone();
state
.update_validation_config(
update.mode,
update.aggregate_errors,
update.validate_responses,
update.overrides.unwrap_or_default(),
)
.await;
tracing::info!(
"Updated validation settings: mode={}, aggregate_errors={}",
mode,
update.aggregate_errors
);
Json(ApiResponse::success("Validation settings updated".to_string()))
}
pub async fn get_env_vars() -> Json<ApiResponse<HashMap<String, String>>> {
let mut env_vars = HashMap::new();
let relevant_vars = [
"MOCKFORGE_LATENCY_ENABLED",
"MOCKFORGE_FAILURES_ENABLED",
"MOCKFORGE_PROXY_ENABLED",
"MOCKFORGE_RECORD_ENABLED",
"MOCKFORGE_REPLAY_ENABLED",
"MOCKFORGE_LOG_LEVEL",
"MOCKFORGE_CONFIG_FILE",
"RUST_LOG",
"MOCKFORGE_HTTP_PORT",
"MOCKFORGE_HTTP_HOST",
"MOCKFORGE_HTTP_OPENAPI_SPEC",
"MOCKFORGE_CORS_ENABLED",
"MOCKFORGE_REQUEST_TIMEOUT_SECS",
"MOCKFORGE_WS_PORT",
"MOCKFORGE_WS_HOST",
"MOCKFORGE_WS_REPLAY_FILE",
"MOCKFORGE_WS_CONNECTION_TIMEOUT_SECS",
"MOCKFORGE_GRPC_PORT",
"MOCKFORGE_GRPC_HOST",
"MOCKFORGE_ADMIN_ENABLED",
"MOCKFORGE_ADMIN_PORT",
"MOCKFORGE_ADMIN_HOST",
"MOCKFORGE_ADMIN_MOUNT_PATH",
"MOCKFORGE_ADMIN_API_ENABLED",
"MOCKFORGE_RESPONSE_TEMPLATE_EXPAND",
"MOCKFORGE_REQUEST_VALIDATION",
"MOCKFORGE_AGGREGATE_ERRORS",
"MOCKFORGE_RESPONSE_VALIDATION",
"MOCKFORGE_VALIDATION_STATUS",
"MOCKFORGE_RAG_ENABLED",
"MOCKFORGE_FAKE_TOKENS",
"MOCKFORGE_FIXTURES_DIR",
];
for var_name in &relevant_vars {
if let Ok(value) = std::env::var(var_name) {
env_vars.insert(var_name.to_string(), value);
}
}
Json(ApiResponse::success(env_vars))
}
pub async fn update_env_var(Json(update): Json<EnvVarUpdate>) -> Json<ApiResponse<String>> {
std::env::set_var(&update.key, &update.value);
tracing::info!("Updated environment variable: {}={}", update.key, update.value);
Json(ApiResponse::success(format!(
"Environment variable {} updated to '{}'. Note: This change is not persisted and will be lost on restart.",
update.key, update.value
)))
}
pub async fn get_file_content(
Json(request): Json<FileContentRequest>,
) -> Json<ApiResponse<String>> {
if let Err(e) = validate_file_path(&request.file_path) {
return Json(ApiResponse::error(format!("Invalid file path: {}", e)));
}
match tokio::fs::read_to_string(&request.file_path).await {
Ok(content) => {
if let Err(e) = validate_file_content(&content) {
return Json(ApiResponse::error(format!("Invalid file content: {}", e)));
}
Json(ApiResponse::success(content))
}
Err(e) => Json(ApiResponse::error(format!("Failed to read file: {}", e))),
}
}
pub async fn save_file_content(Json(request): Json<FileSaveRequest>) -> Json<ApiResponse<String>> {
match save_file_to_filesystem(&request.file_path, &request.content).await {
Ok(_) => {
tracing::info!("Successfully saved file: {}", request.file_path);
Json(ApiResponse::success("File saved successfully".to_string()))
}
Err(e) => {
tracing::error!("Failed to save file {}: {}", request.file_path, e);
Json(ApiResponse::error(format!("Failed to save file: {}", e)))
}
}
}
async fn save_file_to_filesystem(file_path: &str, content: &str) -> Result<()> {
validate_file_path(file_path)?;
validate_file_content(content)?;
let path = std::path::PathBuf::from(file_path);
let content = content.to_string();
let path_clone = path.clone();
let content_clone = content.clone();
tokio::task::spawn_blocking(move || {
if let Some(parent) = path_clone.parent() {
std::fs::create_dir_all(parent).map_err(|e| {
Error::generic(format!("Failed to create directory {}: {}", parent.display(), e))
})?;
}
std::fs::write(&path_clone, &content_clone).map_err(|e| {
Error::generic(format!("Failed to write file {}: {}", path_clone.display(), e))
})?;
let written_content = std::fs::read_to_string(&path_clone).map_err(|e| {
Error::generic(format!("Failed to verify written file {}: {}", path_clone.display(), e))
})?;
if written_content != content_clone {
return Err(Error::generic(format!(
"File content verification failed for {}",
path_clone.display()
)));
}
Ok::<_, Error>(())
})
.await
.map_err(|e| Error::generic(format!("Task join error: {}", e)))??;
tracing::info!("File saved successfully: {} ({} bytes)", path.display(), content.len());
Ok(())
}
fn validate_file_path(file_path: &str) -> Result<()> {
if file_path.contains("..") {
return Err(Error::generic("Path traversal detected in file path".to_string()));
}
let path = std::path::Path::new(file_path);
if path.is_absolute() {
let allowed_dirs = [
std::env::current_dir().unwrap_or_default(),
std::path::PathBuf::from("."),
std::path::PathBuf::from("fixtures"),
std::path::PathBuf::from("config"),
];
let mut is_allowed = false;
for allowed_dir in &allowed_dirs {
if path.starts_with(allowed_dir) {
is_allowed = true;
break;
}
}
if !is_allowed {
return Err(Error::generic("File path is outside allowed directories".to_string()));
}
}
let dangerous_extensions = ["exe", "bat", "cmd", "sh", "ps1", "scr", "com"];
if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
if dangerous_extensions.contains(&extension.to_lowercase().as_str()) {
return Err(Error::generic(format!(
"Dangerous file extension not allowed: {}",
extension
)));
}
}
Ok(())
}
fn validate_file_content(content: &str) -> Result<()> {
if content.len() > 10 * 1024 * 1024 {
return Err(Error::generic("File content too large (max 10MB)".to_string()));
}
if content.contains('\0') {
return Err(Error::generic("File content contains null bytes".to_string()));
}
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureDeleteRequest {
pub fixture_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnvVarUpdate {
pub key: String,
pub value: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureBulkDeleteRequest {
pub fixture_ids: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureBulkDeleteResult {
pub deleted_count: usize,
pub total_requested: usize,
pub errors: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureRenameRequest {
pub new_name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FixtureMoveRequest {
pub new_path: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileContentRequest {
pub file_path: String,
pub file_type: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FileSaveRequest {
pub file_path: String,
pub content: String,
}
pub async fn get_smoke_tests(
State(state): State<AdminState>,
) -> Json<ApiResponse<Vec<SmokeTestResult>>> {
let results = state.get_smoke_test_results().await;
Json(ApiResponse::success(results))
}
pub async fn run_smoke_tests_endpoint(
State(state): State<AdminState>,
) -> Json<ApiResponse<String>> {
tracing::info!("Starting smoke test execution");
let state_clone = state.clone();
tokio::spawn(async move {
if let Err(e) = execute_smoke_tests(&state_clone).await {
tracing::error!("Smoke test execution failed: {}", e);
} else {
tracing::info!("Smoke test execution completed successfully");
}
});
Json(ApiResponse::success(
"Smoke tests started. Check results in the smoke tests section.".to_string(),
))
}
async fn execute_smoke_tests(state: &AdminState) -> Result<()> {
let base_url =
std::env::var("MOCKFORGE_BASE_URL").unwrap_or_else(|_| "http://localhost:3000".to_string());
let context = SmokeTestContext {
base_url,
timeout_seconds: 30,
parallel: true,
};
let fixtures = scan_fixtures_directory()?;
let http_fixtures: Vec<&FixtureInfo> =
fixtures.iter().filter(|f| f.protocol == "http").collect();
if http_fixtures.is_empty() {
tracing::warn!("No HTTP fixtures found for smoke testing");
return Ok(());
}
tracing::info!("Running smoke tests for {} HTTP fixtures", http_fixtures.len());
let mut test_results = Vec::new();
for fixture in http_fixtures {
let test_result = create_smoke_test_from_fixture(fixture);
test_results.push(test_result);
}
let mut executed_results = Vec::new();
for mut test_result in test_results {
test_result.status = "running".to_string();
state.update_smoke_test_result(test_result.clone()).await;
let start_time = std::time::Instant::now();
match execute_single_smoke_test(&test_result, &context).await {
Ok((status_code, response_time_ms)) => {
test_result.status = "passed".to_string();
test_result.status_code = Some(status_code);
test_result.response_time_ms = Some(response_time_ms);
test_result.error_message = None;
}
Err(e) => {
test_result.status = "failed".to_string();
test_result.error_message = Some(e.to_string());
test_result.status_code = None;
test_result.response_time_ms = None;
}
}
let duration = start_time.elapsed();
test_result.duration_seconds = Some(duration.as_secs_f64());
test_result.last_run = Some(Utc::now());
executed_results.push(test_result.clone());
state.update_smoke_test_result(test_result).await;
}
tracing::info!("Smoke test execution completed: {} tests run", executed_results.len());
Ok(())
}
fn create_smoke_test_from_fixture(fixture: &FixtureInfo) -> SmokeTestResult {
let test_name = format!("{} {}", fixture.method, fixture.path);
let description = format!("Smoke test for {} endpoint", fixture.path);
SmokeTestResult {
id: format!("smoke_{}", fixture.id),
name: test_name,
method: fixture.method.clone(),
path: fixture.path.clone(),
description,
last_run: None,
status: "pending".to_string(),
response_time_ms: None,
error_message: None,
status_code: None,
duration_seconds: None,
}
}
async fn execute_single_smoke_test(
test: &SmokeTestResult,
context: &SmokeTestContext,
) -> Result<(u16, u64)> {
let url = format!("{}{}", context.base_url, test.path);
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(context.timeout_seconds))
.build()
.map_err(|e| Error::generic(format!("Failed to create HTTP client: {}", e)))?;
let start_time = std::time::Instant::now();
let response = match test.method.as_str() {
"GET" => client.get(&url).send().await,
"POST" => client.post(&url).send().await,
"PUT" => client.put(&url).send().await,
"DELETE" => client.delete(&url).send().await,
"PATCH" => client.patch(&url).send().await,
"HEAD" => client.head(&url).send().await,
"OPTIONS" => client.request(reqwest::Method::OPTIONS, &url).send().await,
_ => {
return Err(Error::generic(format!("Unsupported HTTP method: {}", test.method)));
}
};
let response_time = start_time.elapsed();
let response_time_ms = response_time.as_millis() as u64;
match response {
Ok(resp) => {
let status_code = resp.status().as_u16();
if (200..400).contains(&status_code) {
Ok((status_code, response_time_ms))
} else {
Err(Error::generic(format!(
"HTTP error: {} {}",
status_code,
resp.status().canonical_reason().unwrap_or("Unknown")
)))
}
}
Err(e) => Err(Error::generic(format!("Request failed: {}", e))),
}
}
#[derive(Debug, Deserialize)]
pub struct PluginInstallRequest {
pub source: String,
#[serde(default)]
pub force: bool,
#[serde(default)]
pub skip_validation: bool,
#[serde(default)]
pub no_verify: bool,
pub checksum: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct PluginValidateRequest {
pub source: String,
}
fn find_plugin_directory(path: &std::path::Path) -> Option<std::path::PathBuf> {
if path.join("plugin.yaml").exists() {
return Some(path.to_path_buf());
}
let entries = std::fs::read_dir(path).ok()?;
for entry in entries.filter_map(|e| e.ok()) {
let child = entry.path();
if child.is_dir() && child.join("plugin.yaml").exists() {
return Some(child);
}
}
None
}
async fn resolve_plugin_source_path(
source: PluginSource,
checksum: Option<&str>,
) -> std::result::Result<std::path::PathBuf, String> {
match source {
PluginSource::Local(path) => Ok(path),
PluginSource::Url { url, .. } => {
let loader = RemotePluginLoader::new(RemotePluginConfig::default())
.map_err(|e| format!("Failed to initialize remote plugin loader: {}", e))?;
loader
.download_with_checksum(&url, checksum)
.await
.map_err(|e| format!("Failed to download plugin from URL: {}", e))
}
PluginSource::Git(git_source) => {
let loader = GitPluginLoader::new(GitPluginConfig::default())
.map_err(|e| format!("Failed to initialize git plugin loader: {}", e))?;
loader
.clone_from_git(&git_source)
.await
.map_err(|e| format!("Failed to clone plugin from git: {}", e))
}
PluginSource::Registry { name, version } => Err(format!(
"Registry plugin installation is not yet supported from the admin API (requested {}@{})",
name,
version.unwrap_or_else(|| "latest".to_string())
)),
}
}
pub async fn install_plugin(
State(state): State<AdminState>,
Json(request): Json<PluginInstallRequest>,
) -> impl IntoResponse {
let source = request.source.trim().to_string();
if source.is_empty() {
return Json(json!({
"success": false,
"error": "Plugin source is required"
}));
}
if request.skip_validation {
return Json(json!({
"success": false,
"error": "Skipping validation is not supported in admin install flow."
}));
}
if request.no_verify {
return Json(json!({
"success": false,
"error": "Disabling signature verification is not supported in admin install flow."
}));
}
let force = request.force;
let checksum = request.checksum.clone();
let state_for_install = state.clone();
let install_result = tokio::task::spawn_blocking(
move || -> std::result::Result<(String, String, String), String> {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.map_err(|e| format!("Failed to initialize install runtime: {}", e))?;
let (plugin_instance, plugin_id, plugin_name, plugin_version) =
runtime.block_on(async move {
let parsed_source = PluginSource::parse(&source)
.map_err(|e| format!("Invalid plugin source: {}", e))?;
let source_path =
resolve_plugin_source_path(parsed_source, checksum.as_deref()).await?;
let plugin_root = if source_path.is_dir() {
find_plugin_directory(&source_path).unwrap_or(source_path.clone())
} else {
source_path.clone()
};
if !plugin_root.exists() || !plugin_root.is_dir() {
return Err(format!(
"Resolved plugin path is not a directory: {}",
plugin_root.display()
));
}
let loader = PluginLoader::new(PluginLoaderConfig::default());
let manifest = loader
.validate_plugin(&plugin_root)
.await
.map_err(|e| format!("Failed to validate plugin: {}", e))?;
let plugin_id = manifest.info.id.clone();
let plugin_name = manifest.info.name.clone();
let plugin_version = manifest.info.version.to_string();
let parent_dir = plugin_root
.parent()
.unwrap_or_else(|| std::path::Path::new("."))
.to_string_lossy()
.to_string();
let runtime_loader = PluginLoader::new(PluginLoaderConfig {
plugin_dirs: vec![parent_dir],
..PluginLoaderConfig::default()
});
runtime_loader
.load_plugin(&plugin_id)
.await
.map_err(|e| format!("Failed to load plugin into runtime: {}", e))?;
let plugin_instance =
runtime_loader.get_plugin(&plugin_id).await.ok_or_else(|| {
"Plugin loaded but instance was not retrievable from loader".to_string()
})?;
Ok::<_, String>((
plugin_instance,
plugin_id.to_string(),
plugin_name,
plugin_version,
))
})?;
let mut registry = state_for_install.plugin_registry.blocking_write();
if let Some(existing_id) =
registry.list_plugins().into_iter().find(|id| id.as_str() == plugin_id)
{
if force {
registry.remove_plugin(&existing_id).map_err(|e| {
format!("Failed to remove existing plugin before reinstall: {}", e)
})?;
} else {
return Err(format!(
"Plugin '{}' is already installed. Use force=true to reinstall.",
plugin_id
));
}
}
registry
.add_plugin(plugin_instance)
.map_err(|e| format!("Failed to register plugin in admin registry: {}", e))?;
Ok((plugin_id, plugin_name, plugin_version))
},
)
.await;
let (plugin_id, plugin_name, plugin_version) = match install_result {
Ok(Ok(result)) => result,
Ok(Err(err)) => {
return Json(json!({
"success": false,
"error": err
}))
}
Err(err) => {
return Json(json!({
"success": false,
"error": format!("Plugin installation task failed: {}", err)
}))
}
};
Json(json!({
"success": true,
"data": {
"plugin_id": plugin_id,
"name": plugin_name,
"version": plugin_version
},
"message": "Plugin installed and registered in runtime."
}))
}
pub async fn validate_plugin(Json(request): Json<PluginValidateRequest>) -> impl IntoResponse {
let source = request.source.trim();
if source.is_empty() {
return Json(json!({
"success": false,
"error": "Plugin source is required"
}));
}
let source = match PluginSource::parse(source) {
Ok(source) => source,
Err(e) => {
return Json(json!({
"success": false,
"error": format!("Invalid plugin source: {}", e)
}));
}
};
let path = match source.clone() {
PluginSource::Local(path) => path,
PluginSource::Url { .. } | PluginSource::Git(_) => {
match resolve_plugin_source_path(source, None).await {
Ok(path) => path,
Err(err) => {
return Json(json!({
"success": false,
"error": err
}))
}
}
}
PluginSource::Registry { .. } => {
return Json(json!({
"success": false,
"error": "Registry plugin validation is not yet supported from the admin API."
}))
}
};
let plugin_root = if path.is_dir() {
find_plugin_directory(&path).unwrap_or(path.clone())
} else {
path
};
let loader = PluginLoader::new(PluginLoaderConfig::default());
match loader.validate_plugin(&plugin_root).await {
Ok(manifest) => Json(json!({
"success": true,
"data": {
"valid": true,
"id": manifest.info.id.to_string(),
"name": manifest.info.name,
"version": manifest.info.version.to_string()
}
})),
Err(e) => Json(json!({
"success": false,
"data": { "valid": false },
"error": format!("Plugin validation failed: {}", e)
})),
}
}
pub async fn update_traffic_shaping(
State(state): State<AdminState>,
Json(config): Json<TrafficShapingConfig>,
) -> Json<ApiResponse<String>> {
if config.burst_loss.burst_probability > 1.0
|| config.burst_loss.loss_rate_during_burst > 1.0
|| config.burst_loss.burst_probability < 0.0
|| config.burst_loss.loss_rate_during_burst < 0.0
{
return Json(ApiResponse::error(
"Burst loss probabilities must be between 0.0 and 1.0".to_string(),
));
}
{
let mut cfg = state.config.write().await;
cfg.traffic_shaping = config.clone();
}
if let Some(ref chaos_api_state) = state.chaos_api_state {
let mut chaos_config = chaos_api_state.config.write().await;
chaos_config.traffic_shaping = Some(mockforge_chaos::config::TrafficShapingConfig {
enabled: config.enabled,
bandwidth_limit_bps: config.bandwidth.max_bytes_per_sec,
packet_loss_percent: config.burst_loss.loss_rate_during_burst * 100.0,
max_connections: 0,
connection_timeout_ms: 30000,
});
}
Json(ApiResponse::success("Traffic shaping updated".to_string()))
}
pub async fn import_postman(
State(state): State<AdminState>,
Json(request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
use mockforge_core::workspace_import::{import_postman_to_workspace, WorkspaceImportConfig};
use uuid::Uuid;
let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
let filename = request.get("filename").and_then(|v| v.as_str());
let environment = request.get("environment").and_then(|v| v.as_str());
let base_url = request.get("base_url").and_then(|v| v.as_str());
let import_result = match mockforge_core::import::import_postman_collection(content, base_url) {
Ok(result) => result,
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "postman".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.clone()),
};
let mut history = state.import_history.write().await;
history.push(entry);
return Json(ApiResponse::error(format!("Postman import failed: {}", e)));
}
};
let workspace_name = filename
.and_then(|f| f.split('.').next())
.unwrap_or("Imported Postman Collection");
let config = WorkspaceImportConfig {
create_folders: true,
base_folder_name: None,
preserve_hierarchy: true,
max_depth: 5,
};
let routes: Vec<ImportRoute> = import_result
.routes
.into_iter()
.map(|route| ImportRoute {
method: route.method,
path: route.path,
headers: route.headers,
body: route.body,
response: ImportResponse {
status: route.response.status,
headers: route.response.headers,
body: route.response.body,
},
})
.collect();
match import_postman_to_workspace(routes, workspace_name.to_string(), config) {
Ok(workspace_result) => {
if let Err(e) =
state.workspace_persistence.save_workspace(&workspace_result.workspace).await
{
tracing::error!("Failed to save workspace: {}", e);
return Json(ApiResponse::error(format!(
"Import succeeded but failed to save workspace: {}",
e
)));
}
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "postman".to_string(),
timestamp: Utc::now(),
routes_count: workspace_result.request_count,
variables_count: import_result.variables.len(),
warnings_count: workspace_result.warnings.len(),
success: true,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: None,
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::success(format!(
"Successfully imported {} routes into workspace '{}'",
workspace_result.request_count, workspace_name
)))
}
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "postman".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.to_string()),
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
}
}
}
pub async fn import_insomnia(
State(state): State<AdminState>,
Json(request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
use uuid::Uuid;
let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
let filename = request.get("filename").and_then(|v| v.as_str());
let environment = request.get("environment").and_then(|v| v.as_str());
let base_url = request.get("base_url").and_then(|v| v.as_str());
let import_result = match mockforge_core::import::import_insomnia_export(content, environment) {
Ok(result) => result,
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "insomnia".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.clone()),
};
let mut history = state.import_history.write().await;
history.push(entry);
return Json(ApiResponse::error(format!("Insomnia import failed: {}", e)));
}
};
let workspace_name = filename
.and_then(|f| f.split('.').next())
.unwrap_or("Imported Insomnia Collection");
let _config = WorkspaceImportConfig {
create_folders: true,
base_folder_name: None,
preserve_hierarchy: true,
max_depth: 5,
};
let variables_count = import_result.variables.len();
match mockforge_core::workspace_import::create_workspace_from_insomnia(
import_result,
Some(workspace_name.to_string()),
) {
Ok(workspace_result) => {
if let Err(e) =
state.workspace_persistence.save_workspace(&workspace_result.workspace).await
{
tracing::error!("Failed to save workspace: {}", e);
return Json(ApiResponse::error(format!(
"Import succeeded but failed to save workspace: {}",
e
)));
}
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "insomnia".to_string(),
timestamp: Utc::now(),
routes_count: workspace_result.request_count,
variables_count,
warnings_count: workspace_result.warnings.len(),
success: true,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: None,
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::success(format!(
"Successfully imported {} routes into workspace '{}'",
workspace_result.request_count, workspace_name
)))
}
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "insomnia".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: environment.map(|s| s.to_string()),
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.to_string()),
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
}
}
}
pub async fn import_openapi(
State(_state): State<AdminState>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("OpenAPI import completed".to_string()))
}
pub async fn import_curl(
State(state): State<AdminState>,
Json(request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
use uuid::Uuid;
let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
let filename = request.get("filename").and_then(|v| v.as_str());
let base_url = request.get("base_url").and_then(|v| v.as_str());
let import_result = match mockforge_core::import::import_curl_commands(content, base_url) {
Ok(result) => result,
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "curl".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: None,
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.clone()),
};
let mut history = state.import_history.write().await;
history.push(entry);
return Json(ApiResponse::error(format!("Curl import failed: {}", e)));
}
};
let workspace_name =
filename.and_then(|f| f.split('.').next()).unwrap_or("Imported Curl Commands");
match mockforge_core::workspace_import::create_workspace_from_curl(
import_result,
Some(workspace_name.to_string()),
) {
Ok(workspace_result) => {
if let Err(e) =
state.workspace_persistence.save_workspace(&workspace_result.workspace).await
{
tracing::error!("Failed to save workspace: {}", e);
return Json(ApiResponse::error(format!(
"Import succeeded but failed to save workspace: {}",
e
)));
}
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "curl".to_string(),
timestamp: Utc::now(),
routes_count: workspace_result.request_count,
variables_count: 0, warnings_count: workspace_result.warnings.len(),
success: true,
filename: filename.map(|s| s.to_string()),
environment: None,
base_url: base_url.map(|s| s.to_string()),
error_message: None,
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::success(format!(
"Successfully imported {} routes into workspace '{}'",
workspace_result.request_count, workspace_name
)))
}
Err(e) => {
let entry = ImportHistoryEntry {
id: Uuid::new_v4().to_string(),
format: "curl".to_string(),
timestamp: Utc::now(),
routes_count: 0,
variables_count: 0,
warnings_count: 0,
success: false,
filename: filename.map(|s| s.to_string()),
environment: None,
base_url: base_url.map(|s| s.to_string()),
error_message: Some(e.to_string()),
};
let mut history = state.import_history.write().await;
history.push(entry);
Json(ApiResponse::error(format!("Failed to create workspace: {}", e)))
}
}
}
pub async fn preview_import(
State(_state): State<AdminState>,
Json(request): Json<serde_json::Value>,
) -> Json<ApiResponse<serde_json::Value>> {
use mockforge_core::import::{
import_curl_commands, import_insomnia_export, import_postman_collection,
};
let content = request.get("content").and_then(|v| v.as_str()).unwrap_or("");
let filename = request.get("filename").and_then(|v| v.as_str());
let environment = request.get("environment").and_then(|v| v.as_str());
let base_url = request.get("base_url").and_then(|v| v.as_str());
let format = if let Some(fname) = filename {
if fname.to_lowercase().contains("postman")
|| fname.to_lowercase().ends_with(".postman_collection")
{
"postman"
} else if fname.to_lowercase().contains("insomnia")
|| fname.to_lowercase().ends_with(".insomnia")
{
"insomnia"
} else if fname.to_lowercase().contains("curl")
|| fname.to_lowercase().ends_with(".sh")
|| fname.to_lowercase().ends_with(".curl")
{
"curl"
} else {
"unknown"
}
} else {
"unknown"
};
match format {
"postman" => match import_postman_collection(content, base_url) {
Ok(import_result) => {
let routes: Vec<serde_json::Value> = import_result
.routes
.into_iter()
.map(|route| {
serde_json::json!({
"method": route.method,
"path": route.path,
"headers": route.headers,
"body": route.body,
"status_code": route.response.status,
"response": serde_json::json!({
"status": route.response.status,
"headers": route.response.headers,
"body": route.response.body
})
})
})
.collect();
let response = serde_json::json!({
"routes": routes,
"variables": import_result.variables,
"warnings": import_result.warnings
});
Json(ApiResponse::success(response))
}
Err(e) => Json(ApiResponse::error(format!("Postman import failed: {}", e))),
},
"insomnia" => match import_insomnia_export(content, environment) {
Ok(import_result) => {
let routes: Vec<serde_json::Value> = import_result
.routes
.into_iter()
.map(|route| {
serde_json::json!({
"method": route.method,
"path": route.path,
"headers": route.headers,
"body": route.body,
"status_code": route.response.status,
"response": serde_json::json!({
"status": route.response.status,
"headers": route.response.headers,
"body": route.response.body
})
})
})
.collect();
let response = serde_json::json!({
"routes": routes,
"variables": import_result.variables,
"warnings": import_result.warnings
});
Json(ApiResponse::success(response))
}
Err(e) => Json(ApiResponse::error(format!("Insomnia import failed: {}", e))),
},
"curl" => match import_curl_commands(content, base_url) {
Ok(import_result) => {
let routes: Vec<serde_json::Value> = import_result
.routes
.into_iter()
.map(|route| {
serde_json::json!({
"method": route.method,
"path": route.path,
"headers": route.headers,
"body": route.body,
"status_code": route.response.status,
"response": serde_json::json!({
"status": route.response.status,
"headers": route.response.headers,
"body": route.response.body
})
})
})
.collect();
let response = serde_json::json!({
"routes": routes,
"variables": serde_json::json!({}),
"warnings": import_result.warnings
});
Json(ApiResponse::success(response))
}
Err(e) => Json(ApiResponse::error(format!("Curl import failed: {}", e))),
},
_ => Json(ApiResponse::error("Unsupported import format".to_string())),
}
}
pub async fn get_import_history(
State(state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
let history = state.import_history.read().await;
let total = history.len();
let imports: Vec<serde_json::Value> = history
.iter()
.rev()
.take(50)
.map(|entry| {
serde_json::json!({
"id": entry.id,
"format": entry.format,
"timestamp": entry.timestamp.to_rfc3339(),
"routes_count": entry.routes_count,
"variables_count": entry.variables_count,
"warnings_count": entry.warnings_count,
"success": entry.success,
"filename": entry.filename,
"environment": entry.environment,
"base_url": entry.base_url,
"error_message": entry.error_message
})
})
.collect();
let response = serde_json::json!({
"imports": imports,
"total": total
});
Json(ApiResponse::success(response))
}
pub async fn get_admin_api_state(
State(_state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"status": "active"
})))
}
pub async fn get_admin_api_replay(
State(_state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"replay": []
})))
}
pub async fn get_sse_status(
State(_state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"available": true,
"endpoint": "/sse",
"config": {
"event_type": "status",
"interval_ms": 1000,
"data_template": "{}"
}
})))
}
pub async fn get_sse_connections(
State(_state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"active_connections": 0
})))
}
pub async fn get_workspaces(
State(_state): State<AdminState>,
) -> Json<ApiResponse<Vec<serde_json::Value>>> {
Json(ApiResponse::success(vec![]))
}
pub async fn create_workspace(
State(_state): State<AdminState>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Workspace created".to_string()))
}
pub async fn open_workspace_from_directory(
State(_state): State<AdminState>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Workspace opened from directory".to_string()))
}
pub async fn get_reality_level(
State(state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.reality_engine.read().await;
let level = engine.get_level().await;
let config = engine.get_config().await;
Json(ApiResponse::success(serde_json::json!({
"level": level.value(),
"level_name": level.name(),
"description": level.description(),
"chaos": {
"enabled": config.chaos.enabled,
"error_rate": config.chaos.error_rate,
"delay_rate": config.chaos.delay_rate,
},
"latency": {
"base_ms": config.latency.base_ms,
"jitter_ms": config.latency.jitter_ms,
},
"mockai": {
"enabled": config.mockai.enabled,
},
})))
}
#[derive(Deserialize)]
pub struct SetRealityLevelRequest {
level: u8,
}
pub async fn set_reality_level(
State(state): State<AdminState>,
Json(request): Json<SetRealityLevelRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let level = match mockforge_core::RealityLevel::from_value(request.level) {
Some(l) => l,
None => {
return Json(ApiResponse::error(format!(
"Invalid reality level: {}. Must be between 1 and 5.",
request.level
)));
}
};
let engine = state.reality_engine.write().await;
engine.set_level(level).await;
let config = engine.get_config().await;
drop(engine);
let mut update_errors = Vec::new();
if let Some(ref chaos_api_state) = state.chaos_api_state {
let mut chaos_config = chaos_api_state.config.write().await;
use mockforge_chaos::config::{FaultInjectionConfig, LatencyConfig};
let latency_config = if config.latency.base_ms > 0 {
Some(LatencyConfig {
enabled: true,
fixed_delay_ms: Some(config.latency.base_ms),
random_delay_range_ms: config
.latency
.max_ms
.map(|max| (config.latency.min_ms, max)),
jitter_percent: if config.latency.jitter_ms > 0 {
(config.latency.jitter_ms as f64 / config.latency.base_ms as f64).min(1.0)
} else {
0.0
},
probability: 1.0,
})
} else {
None
};
let fault_injection_config = if config.chaos.enabled {
Some(FaultInjectionConfig {
enabled: true,
http_errors: config.chaos.status_codes.clone(),
http_error_probability: config.chaos.error_rate,
connection_errors: false,
connection_error_probability: 0.0,
timeout_errors: config.chaos.inject_timeouts,
timeout_ms: config.chaos.timeout_ms,
timeout_probability: if config.chaos.inject_timeouts {
config.chaos.error_rate
} else {
0.0
},
partial_responses: false,
partial_response_probability: 0.0,
payload_corruption: false,
payload_corruption_probability: 0.0,
corruption_type: mockforge_chaos::config::CorruptionType::None,
error_pattern: Some(mockforge_chaos::config::ErrorPattern::Random {
probability: config.chaos.error_rate,
}),
mockai_enabled: false,
})
} else {
None
};
chaos_config.enabled = config.chaos.enabled;
chaos_config.latency = latency_config;
chaos_config.fault_injection = fault_injection_config;
drop(chaos_config);
tracing::info!("✅ Updated chaos config for reality level {}", level.value());
}
if let Some(ref latency_injector) = state.latency_injector {
match mockforge_core::latency::LatencyInjector::update_profile_async(
latency_injector,
config.latency.clone(),
)
.await
{
Ok(_) => {
tracing::info!("✅ Updated latency injector for reality level {}", level.value());
}
Err(e) => {
let error_msg = format!("Failed to update latency injector: {}", e);
tracing::warn!("{}", error_msg);
update_errors.push(error_msg);
}
}
}
if let Some(ref mockai) = state.mockai {
match mockforge_core::intelligent_behavior::MockAI::update_config_async(
mockai,
config.mockai.clone(),
)
.await
{
Ok(_) => {
tracing::info!("✅ Updated MockAI config for reality level {}", level.value());
}
Err(e) => {
let error_msg = format!("Failed to update MockAI: {}", e);
tracing::warn!("{}", error_msg);
update_errors.push(error_msg);
}
}
}
let mut response = serde_json::json!({
"level": level.value(),
"level_name": level.name(),
"description": level.description(),
"chaos": {
"enabled": config.chaos.enabled,
"error_rate": config.chaos.error_rate,
"delay_rate": config.chaos.delay_rate,
},
"latency": {
"base_ms": config.latency.base_ms,
"jitter_ms": config.latency.jitter_ms,
},
"mockai": {
"enabled": config.mockai.enabled,
},
});
if !update_errors.is_empty() {
response["warnings"] = serde_json::json!(update_errors);
tracing::warn!(
"Reality level updated to {} but some subsystems failed to update: {:?}",
level.value(),
update_errors
);
} else {
tracing::info!(
"✅ Reality level successfully updated to {} (hot-reload applied)",
level.value()
);
}
Json(ApiResponse::success(response))
}
pub async fn list_reality_presets(
State(state): State<AdminState>,
) -> Json<ApiResponse<Vec<serde_json::Value>>> {
let persistence = &state.workspace_persistence;
match persistence.list_reality_presets().await {
Ok(preset_paths) => {
let presets: Vec<serde_json::Value> = preset_paths
.iter()
.map(|path| {
serde_json::json!({
"id": path.file_name().and_then(|n| n.to_str()).unwrap_or("unknown"),
"path": path.to_string_lossy(),
"name": path.file_stem().and_then(|n| n.to_str()).unwrap_or("unknown"),
})
})
.collect();
Json(ApiResponse::success(presets))
}
Err(e) => Json(ApiResponse::error(format!("Failed to list presets: {}", e))),
}
}
#[derive(Deserialize)]
pub struct ImportPresetRequest {
path: String,
}
pub async fn import_reality_preset(
State(state): State<AdminState>,
Json(request): Json<ImportPresetRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let persistence = &state.workspace_persistence;
let path = std::path::Path::new(&request.path);
match persistence.import_reality_preset(path).await {
Ok(preset) => {
let engine = state.reality_engine.write().await;
engine.apply_preset(preset.clone()).await;
Json(ApiResponse::success(serde_json::json!({
"name": preset.name,
"description": preset.description,
"level": preset.config.level.value(),
"level_name": preset.config.level.name(),
})))
}
Err(e) => Json(ApiResponse::error(format!("Failed to import preset: {}", e))),
}
}
#[derive(Deserialize)]
pub struct ExportPresetRequest {
name: String,
description: Option<String>,
}
pub async fn export_reality_preset(
State(state): State<AdminState>,
Json(request): Json<ExportPresetRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.reality_engine.read().await;
let preset = engine.create_preset(request.name.clone(), request.description.clone()).await;
let persistence = &state.workspace_persistence;
let presets_dir = persistence.presets_dir();
let filename = format!("{}.json", request.name.replace(' ', "_").to_lowercase());
let output_path = presets_dir.join(&filename);
match persistence.export_reality_preset(&preset, &output_path).await {
Ok(_) => Json(ApiResponse::success(serde_json::json!({
"name": preset.name,
"description": preset.description,
"path": output_path.to_string_lossy(),
"level": preset.config.level.value(),
}))),
Err(e) => Json(ApiResponse::error(format!("Failed to export preset: {}", e))),
}
}
pub async fn get_continuum_ratio(
State(state): State<AdminState>,
Query(params): Query<HashMap<String, String>>,
) -> Json<ApiResponse<serde_json::Value>> {
let path = params.get("path").cloned().unwrap_or_else(|| "/".to_string());
let engine = state.continuum_engine.read().await;
let ratio = engine.get_blend_ratio(&path).await;
let config = engine.get_config().await;
let enabled = engine.is_enabled().await;
Json(ApiResponse::success(serde_json::json!({
"path": path,
"blend_ratio": ratio,
"enabled": enabled,
"transition_mode": format!("{:?}", config.transition_mode),
"merge_strategy": format!("{:?}", config.merge_strategy),
"default_ratio": config.default_ratio,
})))
}
#[derive(Deserialize)]
pub struct SetContinuumRatioRequest {
path: String,
ratio: f64,
}
pub async fn set_continuum_ratio(
State(state): State<AdminState>,
Json(request): Json<SetContinuumRatioRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let ratio = request.ratio.clamp(0.0, 1.0);
let engine = state.continuum_engine.read().await;
engine.set_blend_ratio(&request.path, ratio).await;
Json(ApiResponse::success(serde_json::json!({
"path": request.path,
"blend_ratio": ratio,
})))
}
pub async fn get_continuum_schedule(
State(state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.continuum_engine.read().await;
let schedule = engine.get_time_schedule().await;
match schedule {
Some(s) => Json(ApiResponse::success(serde_json::json!({
"start_time": s.start_time.to_rfc3339(),
"end_time": s.end_time.to_rfc3339(),
"start_ratio": s.start_ratio,
"end_ratio": s.end_ratio,
"curve": format!("{:?}", s.curve),
"duration_days": s.duration().num_days(),
}))),
None => Json(ApiResponse::success(serde_json::json!(null))),
}
}
#[derive(Deserialize)]
pub struct SetContinuumScheduleRequest {
start_time: String,
end_time: String,
start_ratio: f64,
end_ratio: f64,
curve: Option<String>,
}
pub async fn set_continuum_schedule(
State(state): State<AdminState>,
Json(request): Json<SetContinuumScheduleRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let start_time = chrono::DateTime::parse_from_rfc3339(&request.start_time)
.map_err(|e| format!("Invalid start_time: {}", e))
.map(|dt| dt.with_timezone(&Utc));
let end_time = chrono::DateTime::parse_from_rfc3339(&request.end_time)
.map_err(|e| format!("Invalid end_time: {}", e))
.map(|dt| dt.with_timezone(&Utc));
match (start_time, end_time) {
(Ok(start), Ok(end)) => {
let curve = request
.curve
.as_deref()
.map(|c| match c {
"linear" => mockforge_core::TransitionCurve::Linear,
"exponential" => mockforge_core::TransitionCurve::Exponential,
"sigmoid" => mockforge_core::TransitionCurve::Sigmoid,
_ => mockforge_core::TransitionCurve::Linear,
})
.unwrap_or(mockforge_core::TransitionCurve::Linear);
let schedule = mockforge_core::TimeSchedule::with_curve(
start,
end,
request.start_ratio.clamp(0.0, 1.0),
request.end_ratio.clamp(0.0, 1.0),
curve,
);
let engine = state.continuum_engine.read().await;
engine.set_time_schedule(schedule.clone()).await;
Json(ApiResponse::success(serde_json::json!({
"start_time": schedule.start_time.to_rfc3339(),
"end_time": schedule.end_time.to_rfc3339(),
"start_ratio": schedule.start_ratio,
"end_ratio": schedule.end_ratio,
"curve": format!("{:?}", schedule.curve),
})))
}
(Err(e), _) | (_, Err(e)) => Json(ApiResponse::error(e)),
}
}
#[derive(Deserialize)]
pub struct AdvanceContinuumRatioRequest {
increment: Option<f64>,
}
pub async fn advance_continuum_ratio(
State(state): State<AdminState>,
Json(request): Json<AdvanceContinuumRatioRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let increment = request.increment.unwrap_or(0.1);
let engine = state.continuum_engine.read().await;
engine.advance_ratio(increment).await;
let config = engine.get_config().await;
Json(ApiResponse::success(serde_json::json!({
"default_ratio": config.default_ratio,
"increment": increment,
})))
}
#[derive(Deserialize)]
pub struct SetContinuumEnabledRequest {
enabled: bool,
}
pub async fn set_continuum_enabled(
State(state): State<AdminState>,
Json(request): Json<SetContinuumEnabledRequest>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.continuum_engine.read().await;
engine.set_enabled(request.enabled).await;
Json(ApiResponse::success(serde_json::json!({
"enabled": request.enabled,
})))
}
pub async fn get_continuum_overrides(
State(state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.continuum_engine.read().await;
let overrides = engine.get_manual_overrides().await;
Json(ApiResponse::success(serde_json::json!(overrides)))
}
pub async fn clear_continuum_overrides(
State(state): State<AdminState>,
) -> Json<ApiResponse<serde_json::Value>> {
let engine = state.continuum_engine.read().await;
engine.clear_manual_overrides().await;
Json(ApiResponse::success(serde_json::json!({
"message": "All manual overrides cleared",
})))
}
pub async fn get_workspace(
State(_state): State<AdminState>,
Path(workspace_id): Path<String>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"workspace": {
"summary": {
"id": workspace_id,
"name": "Mock Workspace",
"description": "A mock workspace"
},
"folders": [],
"requests": []
}
})))
}
pub async fn delete_workspace(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Workspace deleted".to_string()))
}
pub async fn set_active_workspace(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Workspace activated".to_string()))
}
pub async fn create_folder(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Folder created".to_string()))
}
pub async fn create_request(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Request created".to_string()))
}
pub async fn execute_workspace_request(
State(_state): State<AdminState>,
Path((_workspace_id, _request_id)): Path<(String, String)>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"status": "executed",
"response": {}
})))
}
pub async fn get_request_history(
State(_state): State<AdminState>,
Path((_workspace_id, _request_id)): Path<(String, String)>,
) -> Json<ApiResponse<Vec<serde_json::Value>>> {
Json(ApiResponse::success(vec![]))
}
pub async fn get_folder(
State(_state): State<AdminState>,
Path((_workspace_id, folder_id)): Path<(String, String)>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"folder": {
"summary": {
"id": folder_id,
"name": "Mock Folder",
"description": "A mock folder"
},
"requests": []
}
})))
}
pub async fn import_to_workspace(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Import to workspace completed".to_string()))
}
pub async fn export_workspaces(
State(_state): State<AdminState>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Workspaces exported".to_string()))
}
pub async fn get_environments(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<serde_json::Value>> {
let environments = vec![serde_json::json!({
"id": "global",
"name": "Global",
"description": "Global environment variables",
"variable_count": 0,
"is_global": true,
"active": true,
"order": 0
})];
Json(ApiResponse::success(serde_json::json!({
"environments": environments,
"total": 1
})))
}
pub async fn create_environment(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment created".to_string()))
}
pub async fn update_environment(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id)): Path<(String, String)>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment updated".to_string()))
}
pub async fn delete_environment(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id)): Path<(String, String)>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment deleted".to_string()))
}
pub async fn set_active_environment(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id)): Path<(String, String)>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment activated".to_string()))
}
pub async fn update_environments_order(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment order updated".to_string()))
}
pub async fn get_environment_variables(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id)): Path<(String, String)>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"variables": []
})))
}
pub async fn set_environment_variable(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id)): Path<(String, String)>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment variable set".to_string()))
}
pub async fn remove_environment_variable(
State(_state): State<AdminState>,
Path((_workspace_id, _environment_id, _variable_name)): Path<(String, String, String)>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Environment variable removed".to_string()))
}
pub async fn get_autocomplete_suggestions(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"suggestions": [],
"start_position": 0,
"end_position": 0
})))
}
pub async fn get_sync_status(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<serde_json::Value>> {
Json(ApiResponse::success(serde_json::json!({
"status": "disabled"
})))
}
pub async fn configure_sync(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Sync configured".to_string()))
}
pub async fn disable_sync(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Sync disabled".to_string()))
}
pub async fn trigger_sync(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Sync triggered".to_string()))
}
pub async fn get_sync_changes(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
) -> Json<ApiResponse<Vec<serde_json::Value>>> {
Json(ApiResponse::success(vec![]))
}
pub async fn confirm_sync_changes(
State(_state): State<AdminState>,
Path(_workspace_id): Path<String>,
Json(_request): Json<serde_json::Value>,
) -> Json<ApiResponse<String>> {
Json(ApiResponse::success("Sync changes confirmed".to_string()))
}
pub async fn clear_import_history(State(state): State<AdminState>) -> Json<ApiResponse<String>> {
let mut history = state.import_history.write().await;
history.clear();
Json(ApiResponse::success("Import history cleared".to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_request_metrics_creation() {
use std::collections::HashMap;
let metrics = RequestMetrics {
total_requests: 100,
active_connections: 5,
requests_by_endpoint: HashMap::new(),
response_times: vec![10, 20, 30],
response_times_by_endpoint: HashMap::new(),
errors_by_endpoint: HashMap::new(),
last_request_by_endpoint: HashMap::new(),
};
assert_eq!(metrics.total_requests, 100);
assert_eq!(metrics.active_connections, 5);
assert_eq!(metrics.response_times.len(), 3);
}
#[test]
fn test_system_metrics_creation() {
let metrics = SystemMetrics {
cpu_usage_percent: 45.5,
memory_usage_mb: 100,
active_threads: 10,
};
assert_eq!(metrics.active_threads, 10);
assert!(metrics.cpu_usage_percent > 0.0);
assert_eq!(metrics.memory_usage_mb, 100);
}
#[test]
fn test_time_series_point() {
let point = TimeSeriesPoint {
timestamp: Utc::now(),
value: 42.5,
};
assert_eq!(point.value, 42.5);
}
#[test]
fn test_restart_status() {
let status = RestartStatus {
in_progress: true,
initiated_at: Some(Utc::now()),
reason: Some("Manual restart".to_string()),
success: None,
};
assert!(status.in_progress);
assert!(status.reason.is_some());
}
#[test]
fn test_configuration_state() {
use std::collections::HashMap;
let state = ConfigurationState {
latency_profile: LatencyProfile {
name: "default".to_string(),
base_ms: 100,
jitter_ms: 10,
tag_overrides: HashMap::new(),
},
fault_config: FaultConfig {
enabled: false,
failure_rate: 0.0,
status_codes: vec![],
active_failures: 0,
},
proxy_config: ProxyConfig {
enabled: false,
upstream_url: None,
timeout_seconds: 30,
requests_proxied: 0,
},
validation_settings: ValidationSettings {
mode: "off".to_string(),
aggregate_errors: false,
validate_responses: false,
overrides: HashMap::new(),
},
traffic_shaping: TrafficShapingConfig {
enabled: false,
bandwidth: crate::models::BandwidthConfig {
enabled: false,
max_bytes_per_sec: 1_048_576,
burst_capacity_bytes: 10_485_760,
tag_overrides: HashMap::new(),
},
burst_loss: crate::models::BurstLossConfig {
enabled: false,
burst_probability: 0.1,
burst_duration_ms: 5000,
loss_rate_during_burst: 0.5,
recovery_time_ms: 30000,
tag_overrides: HashMap::new(),
},
},
};
assert_eq!(state.latency_profile.name, "default");
assert!(!state.fault_config.enabled);
assert!(!state.proxy_config.enabled);
}
#[test]
fn test_admin_state_new() {
let http_addr: std::net::SocketAddr = "127.0.0.1:3000".parse().unwrap();
let state = AdminState::new(
Some(http_addr),
None,
None,
None,
true,
8080,
None,
None,
None,
None,
None,
None,
None,
None,
);
assert_eq!(state.http_server_addr, Some(http_addr));
assert!(state.api_enabled);
assert_eq!(state.admin_port, 8080);
}
}