use axum::serve;
use clap::{CommandFactory, Parser, Subcommand};
use clap_complete::{generate, Shell};
use mockforge_chaos::api::create_chaos_api_router;
use mockforge_chaos::config::ChaosConfig;
use mockforge_core::encryption::init_key_store;
use mockforge_core::{
apply_env_overrides, build_file_naming_context, process_generated_file, BarrelGenerator,
GeneratedFile, OpenApiSpec, ServerConfig,
};
use mockforge_data::rag::{EmbeddingProvider, LlmProvider, RagConfig};
use mockforge_observability::prometheus::{prometheus_router, MetricsRegistry};
use serde_json::json;
use std::any::Any;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::net::TcpListener;
#[cfg(feature = "amqp")]
mod amqp_commands;
mod backend_generator;
mod blueprint_commands;
mod client_generator;
mod cloud_commands;
mod contract_diff_commands;
mod contract_sync_commands;
mod deploy_commands;
mod dev_setup_commands;
mod error_helpers;
mod flow_commands;
#[cfg(feature = "ftp")]
mod ftp_commands;
mod git_watch_commands;
mod import_commands;
#[cfg(feature = "kafka")]
mod kafka_commands;
mod mockai_commands;
#[cfg(feature = "mqtt")]
mod mqtt_commands;
mod plugin_commands;
mod progress;
mod recorder_commands;
mod scenario_commands;
#[cfg(feature = "smtp")]
mod smtp_commands;
mod snapshot_commands;
mod template_commands;
mod time_commands;
mod tunnel_commands;
mod vbr_commands;
mod voice_commands;
mod wizard;
mod workspace_commands;
#[cfg(test)]
mod tests;
#[derive(Parser)]
#[command(name = "mockforge")]
#[command(about = "MockForge - Comprehensive API Mocking Framework")]
#[command(version = env!("CARGO_PKG_VERSION"))]
struct Cli {
#[arg(short = 'v', long, global = true, default_value = "info")]
log_level: String,
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
#[allow(clippy::large_enum_variant)]
enum Commands {
#[command(verbatim_doc_comment)]
Serve {
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(short, long)]
profile: Option<String>,
#[arg(long, help_heading = "Server Ports")]
http_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
ws_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
grpc_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
smtp_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
mqtt_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
kafka_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
amqp_port: Option<u16>,
#[arg(long, help_heading = "Server Ports")]
tcp_port: Option<u16>,
#[arg(long, help_heading = "Admin & UI")]
admin: bool,
#[arg(long, help_heading = "Admin & UI")]
admin_port: Option<u16>,
#[arg(long, help_heading = "Observability & Metrics")]
metrics: bool,
#[arg(long, help_heading = "Observability & Metrics")]
metrics_port: Option<u16>,
#[arg(long, help_heading = "Tracing")]
tracing: bool,
#[arg(long, default_value = "mockforge", help_heading = "Tracing")]
tracing_service_name: String,
#[arg(long, default_value = "development", help_heading = "Tracing")]
tracing_environment: String,
#[arg(
long,
default_value = "http://localhost:14268/api/traces",
help_heading = "Tracing"
)]
jaeger_endpoint: String,
#[arg(long, default_value = "1.0", help_heading = "Tracing")]
tracing_sampling_rate: f64,
#[arg(long, help_heading = "API Flight Recorder")]
recorder: bool,
#[arg(
long,
default_value = "./mockforge-recordings.db",
help_heading = "API Flight Recorder"
)]
recorder_db: String,
#[arg(long, help_heading = "API Flight Recorder")]
recorder_no_api: bool,
#[arg(long, help_heading = "API Flight Recorder")]
recorder_api_port: Option<u16>,
#[arg(long, default_value = "10000", help_heading = "API Flight Recorder")]
recorder_max_requests: i64,
#[arg(long, default_value = "7", help_heading = "API Flight Recorder")]
recorder_retention_days: i64,
#[arg(long, help_heading = "Chaos Engineering")]
chaos: bool,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_scenario: Option<String>,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_latency_ms: Option<u64>,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_latency_range: Option<String>,
#[arg(long, default_value = "1.0", help_heading = "Chaos Engineering")]
chaos_latency_probability: f64,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_http_errors: Option<String>,
#[arg(long, default_value = "0.1", help_heading = "Chaos Engineering")]
chaos_http_error_probability: f64,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_rate_limit: Option<u32>,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_bandwidth_limit: Option<u64>,
#[arg(long, help_heading = "Chaos Engineering")]
chaos_packet_loss: Option<f64>,
#[arg(long, help_heading = "Chaos Engineering - gRPC")]
chaos_grpc: bool,
#[arg(long, help_heading = "Chaos Engineering - gRPC")]
chaos_grpc_status_codes: Option<String>,
#[arg(long, default_value = "0.1", help_heading = "Chaos Engineering - gRPC")]
chaos_grpc_stream_interruption_probability: f64,
#[arg(long, help_heading = "Chaos Engineering - WebSocket")]
chaos_websocket: bool,
#[arg(long, help_heading = "Chaos Engineering - WebSocket")]
chaos_websocket_close_codes: Option<String>,
#[arg(
long,
default_value = "0.05",
help_heading = "Chaos Engineering - WebSocket"
)]
chaos_websocket_message_drop_probability: f64,
#[arg(
long,
default_value = "0.05",
help_heading = "Chaos Engineering - WebSocket"
)]
chaos_websocket_message_corruption_probability: f64,
#[arg(long, help_heading = "Chaos Engineering - GraphQL")]
chaos_graphql: bool,
#[arg(long, help_heading = "Chaos Engineering - GraphQL")]
chaos_graphql_error_codes: Option<String>,
#[arg(
long,
default_value = "0.1",
help_heading = "Chaos Engineering - GraphQL"
)]
chaos_graphql_partial_data_probability: f64,
#[arg(long, help_heading = "Chaos Engineering - GraphQL")]
chaos_graphql_resolver_latency: bool,
#[arg(long, help_heading = "Resilience Patterns")]
circuit_breaker: bool,
#[arg(long, default_value = "5", help_heading = "Resilience Patterns")]
circuit_breaker_failure_threshold: u64,
#[arg(long, default_value = "2", help_heading = "Resilience Patterns")]
circuit_breaker_success_threshold: u64,
#[arg(long, default_value = "60000", help_heading = "Resilience Patterns")]
circuit_breaker_timeout_ms: u64,
#[arg(long, default_value = "50.0", help_heading = "Resilience Patterns")]
circuit_breaker_failure_rate: f64,
#[arg(long, help_heading = "Resilience Patterns")]
bulkhead: bool,
#[arg(long, default_value = "100", help_heading = "Resilience Patterns")]
bulkhead_max_concurrent: u32,
#[arg(long, default_value = "10", help_heading = "Resilience Patterns")]
bulkhead_max_queue: u32,
#[arg(long, default_value = "5000", help_heading = "Resilience Patterns")]
bulkhead_queue_timeout_ms: u64,
#[arg(short, long, help_heading = "Server Configuration")]
spec: Option<PathBuf>,
#[arg(long, help_heading = "Server Configuration")]
ws_replay_file: Option<PathBuf>,
#[arg(long, help_heading = "Server Configuration")]
graphql: Option<PathBuf>,
#[arg(long, help_heading = "Server Ports")]
graphql_port: Option<u16>,
#[arg(long, help_heading = "Server Configuration")]
graphql_upstream: Option<String>,
#[arg(long, help_heading = "Traffic Shaping")]
traffic_shaping: bool,
#[arg(long, default_value = "1000000", help_heading = "Traffic Shaping")]
bandwidth_limit: u64,
#[arg(long, default_value = "10000", help_heading = "Traffic Shaping")]
burst_size: u64,
#[arg(long, help_heading = "Network Profiles")]
network_profile: Option<String>,
#[arg(long, help_heading = "Network Profiles")]
list_network_profiles: bool,
#[arg(long, help_heading = "Chaos Engineering - Random")]
chaos_random: bool,
#[arg(
long,
default_value = "0.1",
help_heading = "Chaos Engineering - Random"
)]
chaos_random_error_rate: f64,
#[arg(
long,
default_value = "0.3",
help_heading = "Chaos Engineering - Random"
)]
chaos_random_delay_rate: f64,
#[arg(
long,
default_value = "100",
help_heading = "Chaos Engineering - Random"
)]
chaos_random_min_delay: u64,
#[arg(
long,
default_value = "2000",
help_heading = "Chaos Engineering - Random"
)]
chaos_random_max_delay: u64,
#[arg(long, help_heading = "Chaos Engineering - Profiles")]
chaos_profile: Option<String>,
#[arg(long, help_heading = "AI Features")]
ai_enabled: bool,
#[arg(long, help_heading = "Reality Slider")]
reality_level: Option<u8>,
#[arg(long, help_heading = "AI Features")]
rag_provider: Option<String>,
#[arg(long, help_heading = "AI Features")]
rag_model: Option<String>,
#[arg(long, help_heading = "AI Features")]
rag_api_key: Option<String>,
#[arg(long, help_heading = "Validation")]
dry_run: bool,
#[arg(long, help_heading = "Validation")]
progress: bool,
#[arg(long, help_heading = "Validation")]
verbose: bool,
},
#[cfg(feature = "smtp")]
#[command(verbatim_doc_comment)]
Smtp {
#[command(subcommand)]
smtp_command: SmtpCommands,
},
#[cfg(feature = "mqtt")]
#[command(verbatim_doc_comment)]
Mqtt {
#[command(subcommand)]
mqtt_command: MqttCommands,
},
#[cfg(feature = "ftp")]
#[command(verbatim_doc_comment)]
Ftp {
#[command(subcommand)]
ftp_command: ftp_commands::FtpCommands,
},
#[cfg(feature = "kafka")]
#[command(verbatim_doc_comment)]
Kafka {
#[command(subcommand)]
kafka_command: kafka_commands::KafkaCommands,
},
#[cfg(feature = "amqp")]
#[command(verbatim_doc_comment)]
Amqp {
#[command(subcommand)]
amqp_command: amqp_commands::AmqpCommands,
},
Data {
#[command(subcommand)]
data_command: DataCommands,
},
Admin {
#[arg(short, long, default_value = "9080")]
port: u16,
#[arg(short, long)]
config: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
Sync {
#[arg(short, long)]
workspace_dir: PathBuf,
#[arg(short, long)]
config: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
Quick {
file: PathBuf,
#[arg(short, long, default_value = "3000")]
port: u16,
#[arg(long)]
admin: bool,
#[arg(long, default_value = "9080")]
admin_port: u16,
#[arg(long)]
metrics: bool,
#[arg(long, default_value = "9090")]
metrics_port: u16,
#[arg(long)]
logging: bool,
#[arg(long, default_value = "127.0.0.1")]
host: String,
},
Completions {
#[arg(value_enum)]
shell: Shell,
},
Init {
#[arg(default_value = ".")]
name: String,
#[arg(long)]
no_examples: bool,
},
Wizard,
#[command(verbatim_doc_comment)]
Generate {
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(short, long)]
spec: Option<PathBuf>,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long)]
verbose: bool,
#[arg(long)]
dry_run: bool,
#[arg(long)]
watch: bool,
#[arg(long, default_value = "500")]
watch_debounce: u64,
#[arg(long)]
progress: bool,
},
#[command(verbatim_doc_comment)]
Schema {
#[arg(short, long)]
output: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
DevSetup {
#[command(flatten)]
args: dev_setup_commands::DevSetupArgs,
},
Config {
#[command(subcommand)]
config_command: ConfigCommands,
},
#[command(verbatim_doc_comment)]
GitWatch {
#[arg(value_name = "REPOSITORY_URL")]
repository_url: String,
#[arg(short, long, default_value = "main")]
branch: Option<String>,
#[arg(short, long, value_name = "PATH")]
spec_paths: Vec<String>,
#[arg(long, default_value = "60")]
poll_interval: Option<u64>,
#[arg(long, value_name = "TOKEN")]
auth_token: Option<String>,
#[arg(long, value_name = "DIR")]
cache_dir: Option<PathBuf>,
#[arg(long, value_name = "COMMAND")]
reload_command: Option<String>,
},
#[command(verbatim_doc_comment)]
ContractSync {
#[arg(value_name = "REPOSITORY_URL")]
repository_url: String,
#[arg(short, long, default_value = "main")]
branch: Option<String>,
#[arg(short, long, value_name = "PATH")]
spec_paths: Vec<String>,
#[arg(long, value_name = "FILE")]
mock_config: Option<PathBuf>,
#[arg(long, value_name = "TOKEN")]
auth_token: Option<String>,
#[arg(long, value_name = "DIR")]
cache_dir: Option<PathBuf>,
#[arg(long)]
strict: bool,
#[arg(short, long, value_name = "FILE")]
output: Option<PathBuf>,
#[arg(long)]
update: bool,
},
#[command(verbatim_doc_comment)]
ContractDiff {
#[command(subcommand)]
diff_command: ContractDiffCommands,
},
#[command(verbatim_doc_comment)]
Import {
#[command(subcommand)]
import_command: import_commands::ImportCommands,
},
TestAi {
#[command(subcommand)]
ai_command: AiTestCommands,
},
Plugin {
#[command(subcommand)]
plugin_command: plugin_commands::PluginCommands,
},
Recorder {
#[command(subcommand)]
recorder_command: recorder_commands::RecorderCommands,
},
Flow {
#[command(subcommand)]
flow_command: flow_commands::FlowCommands,
},
#[command(verbatim_doc_comment)]
Scenario {
#[command(subcommand)]
scenario_command: scenario_commands::ScenarioCommands,
},
#[command(verbatim_doc_comment)]
Template {
#[command(subcommand)]
template_command: template_commands::TemplateCommands,
},
#[command(verbatim_doc_comment)]
Blueprint {
#[command(subcommand)]
blueprint_command: blueprint_commands::BlueprintCommands,
},
#[command(verbatim_doc_comment)]
Client {
#[command(subcommand)]
client_command: client_generator::ClientCommand,
},
#[command(verbatim_doc_comment)]
Backend {
#[command(subcommand)]
backend_command: backend_generator::BackendCommand,
},
#[command(verbatim_doc_comment)]
Workspace {
#[command(subcommand)]
workspace_command: workspace_commands::WorkspaceCommands,
},
#[command(verbatim_doc_comment)]
Cloud {
#[command(subcommand)]
cloud_command: cloud_commands::CloudCommands,
},
#[command(verbatim_doc_comment)]
Tunnel {
#[command(subcommand)]
tunnel_command: tunnel_commands::TunnelSubcommand,
},
#[command(verbatim_doc_comment)]
Deploy {
#[command(subcommand)]
deploy_command: deploy_commands::DeploySubcommand,
},
#[command(verbatim_doc_comment)]
Vbr {
#[command(subcommand)]
vbr_command: vbr_commands::VbrCommands,
},
#[command(verbatim_doc_comment)]
Mockai {
#[command(subcommand)]
mockai_command: mockai_commands::MockAICommands,
},
#[command(verbatim_doc_comment)]
Snapshot {
#[command(subcommand)]
snapshot_command: snapshot_commands::SnapshotCommands,
},
#[command(verbatim_doc_comment)]
Voice {
#[command(subcommand)]
voice_command: voice_commands::VoiceCommands,
},
#[command(verbatim_doc_comment)]
Time {
#[command(subcommand)]
time_command: time_commands::TimeCommands,
#[arg(long)]
admin_url: Option<String>,
},
#[command(verbatim_doc_comment)]
Chaos {
#[command(subcommand)]
chaos_command: ChaosCommands,
},
Orchestrate {
#[command(subcommand)]
orchestrate_command: OrchestrateCommands,
},
#[command(verbatim_doc_comment)]
GenerateTests {
#[arg(short, long, default_value = "./mockforge-recordings.db")]
database: PathBuf,
#[arg(short, long, default_value = "rust_reqwest")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long)]
protocol: Option<String>,
#[arg(long)]
method: Option<String>,
#[arg(long)]
path: Option<String>,
#[arg(long)]
status_code: Option<u16>,
#[arg(short, long, default_value = "50")]
limit: usize,
#[arg(long, default_value = "generated_tests")]
suite_name: String,
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
#[arg(long)]
ai_descriptions: bool,
#[arg(long, default_value = "ollama")]
llm_provider: String,
#[arg(long, default_value = "llama2")]
llm_model: String,
#[arg(long)]
llm_endpoint: Option<String>,
#[arg(long)]
llm_api_key: Option<String>,
#[arg(long, default_value = "true")]
validate_body: bool,
#[arg(long, default_value = "true")]
validate_status: bool,
#[arg(long)]
validate_headers: bool,
#[arg(long)]
validate_timing: bool,
#[arg(long)]
max_duration_ms: Option<u64>,
},
#[command(verbatim_doc_comment)]
Suggest {
#[arg(short, long, conflicts_with = "from_description")]
from: Option<PathBuf>,
#[arg(long, conflicts_with = "from")]
from_description: Option<String>,
#[arg(long, default_value = "openapi")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long, default_value = "5")]
num_suggestions: usize,
#[arg(long, default_value = "true")]
include_examples: bool,
#[arg(long)]
domain: Option<String>,
#[arg(long, default_value = "openai")]
llm_provider: String,
#[arg(long)]
llm_model: Option<String>,
#[arg(long)]
llm_endpoint: Option<String>,
#[arg(long)]
llm_api_key: Option<String>,
#[arg(long, default_value = "0.7")]
temperature: f64,
#[arg(long)]
print_json: bool,
},
#[command(verbatim_doc_comment)]
Bench {
#[arg(short, long)]
spec: PathBuf,
#[arg(short, long)]
target: String,
#[arg(short, long, default_value = "1m")]
duration: String,
#[arg(long, default_value = "10")]
vus: u32,
#[arg(long, default_value = "ramp-up")]
scenario: String,
#[arg(long)]
operations: Option<String>,
#[arg(long)]
auth: Option<String>,
#[arg(long)]
headers: Option<String>,
#[arg(short, long, default_value = "bench-results")]
output: PathBuf,
#[arg(long)]
generate_only: bool,
#[arg(long)]
script_output: Option<PathBuf>,
#[arg(long, default_value = "p95")]
threshold_percentile: String,
#[arg(long, default_value = "500")]
threshold_ms: u64,
#[arg(long, default_value = "0.05")]
max_error_rate: f64,
#[arg(short = 'V', long)]
verbose: bool,
},
}
#[derive(Subcommand)]
enum OrchestrateCommands {
#[command(verbatim_doc_comment)]
Start {
#[arg(short, long)]
file: PathBuf,
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
Status {
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
Stop {
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
Validate {
#[arg(short, long)]
file: PathBuf,
},
#[command(verbatim_doc_comment)]
Template {
#[arg(short, long)]
output: PathBuf,
#[arg(long, default_value = "yaml")]
format: String,
},
}
#[derive(Subcommand)]
enum AiTestCommands {
#[command(verbatim_doc_comment)]
IntelligentMock {
#[arg(short, long)]
prompt: String,
#[arg(long)]
rag_provider: Option<String>,
#[arg(long)]
rag_model: Option<String>,
#[arg(short, long)]
output: Option<PathBuf>,
},
Drift {
#[arg(short, long)]
initial_data: PathBuf,
#[arg(short = 'n', long, default_value = "5")]
iterations: usize,
#[arg(short, long)]
output: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
EventStream {
#[arg(short, long)]
narrative: String,
#[arg(short = 'c', long, default_value = "10")]
event_count: usize,
#[arg(long)]
rag_provider: Option<String>,
#[arg(long)]
rag_model: Option<String>,
#[arg(short, long)]
output: Option<PathBuf>,
},
}
#[derive(Subcommand)]
enum ConfigCommands {
Validate {
#[arg(short, long)]
config: Option<PathBuf>,
},
}
#[derive(Subcommand)]
enum ContractDiffCommands {
#[command(verbatim_doc_comment)]
Analyze {
#[arg(short, long)]
spec: PathBuf,
#[arg(long, conflicts_with = "capture_id")]
request_path: Option<PathBuf>,
#[arg(long, conflicts_with = "request_path")]
capture_id: Option<String>,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long)]
llm_provider: Option<String>,
#[arg(long)]
llm_model: Option<String>,
#[arg(long)]
llm_api_key: Option<String>,
#[arg(long)]
confidence_threshold: Option<f64>,
},
#[command(verbatim_doc_comment)]
Compare {
#[arg(long)]
old_spec: PathBuf,
#[arg(long)]
new_spec: PathBuf,
#[arg(short, long)]
output: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
GeneratePatch {
#[arg(short, long)]
spec: PathBuf,
#[arg(long, conflicts_with = "capture_id")]
request_path: Option<PathBuf>,
#[arg(long, conflicts_with = "request_path")]
capture_id: Option<String>,
#[arg(short, long)]
output: PathBuf,
#[arg(long)]
llm_provider: Option<String>,
#[arg(long)]
llm_model: Option<String>,
#[arg(long)]
llm_api_key: Option<String>,
},
#[command(verbatim_doc_comment)]
ApplyPatch {
#[arg(short, long)]
spec: PathBuf,
#[arg(short, long)]
patch: PathBuf,
#[arg(short, long)]
output: Option<PathBuf>,
},
}
#[derive(Subcommand)]
enum SmtpCommands {
Mailbox {
#[command(subcommand)]
mailbox_command: MailboxCommands,
},
Fixtures {
#[command(subcommand)]
fixtures_command: FixturesCommands,
},
Send {
#[arg(short, long)]
to: String,
#[arg(short, long)]
subject: String,
#[arg(short, long, default_value = "Test email from MockForge CLI")]
body: String,
#[arg(long, default_value = "localhost")]
host: String,
#[arg(long, default_value = "1025")]
port: u16,
#[arg(long, default_value = "test@mockforge.cli")]
from: String,
},
}
#[derive(Subcommand)]
enum MailboxCommands {
List,
Show {
email_id: String,
},
Clear,
Export {
#[arg(short, long, default_value = "mbox")]
format: String,
#[arg(short, long)]
output: PathBuf,
},
Search {
#[arg(long)]
sender: Option<String>,
#[arg(long)]
recipient: Option<String>,
#[arg(long)]
subject: Option<String>,
#[arg(long)]
body: Option<String>,
#[arg(long)]
since: Option<String>,
#[arg(long)]
until: Option<String>,
#[arg(long)]
regex: bool,
#[arg(long)]
case_sensitive: bool,
},
}
#[derive(Subcommand)]
enum FixturesCommands {
List,
Reload,
Validate {
file: PathBuf,
},
}
#[derive(Subcommand)]
enum MqttCommands {
Publish {
#[arg(long, default_value = "localhost")]
host: String,
#[arg(long, default_value = "1883")]
port: u16,
#[arg(short, long)]
topic: String,
#[arg(short, long)]
payload: String,
#[arg(short, long, default_value = "0")]
qos: u8,
#[arg(long)]
retain: bool,
},
Subscribe {
#[arg(long, default_value = "localhost")]
host: String,
#[arg(long, default_value = "1883")]
port: u16,
#[arg(short, long)]
topic: String,
#[arg(short, long, default_value = "0")]
qos: u8,
},
Topics {
#[command(subcommand)]
topics_command: MqttTopicsCommands,
},
Fixtures {
#[command(subcommand)]
fixtures_command: MqttFixturesCommands,
},
Clients {
#[command(subcommand)]
clients_command: MqttClientsCommands,
},
}
#[derive(Subcommand)]
enum MqttTopicsCommands {
List,
ClearRetained,
}
#[derive(Subcommand)]
enum MqttFixturesCommands {
Load {
path: PathBuf,
},
StartAutoPublish,
StopAutoPublish,
}
#[derive(Subcommand)]
enum MqttClientsCommands {
List,
Disconnect {
client_id: String,
},
}
#[derive(Subcommand)]
enum DataCommands {
#[command(verbatim_doc_comment)]
Template {
template: String,
#[arg(short, long, default_value = "10")]
rows: usize,
#[arg(short, long, default_value = "json")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long)]
rag: bool,
#[arg(long)]
rag_provider: Option<String>,
#[arg(long)]
rag_model: Option<String>,
#[arg(long)]
rag_endpoint: Option<String>,
#[arg(long)]
rag_timeout: Option<u64>,
#[arg(long)]
rag_max_retries: Option<usize>,
},
#[command(verbatim_doc_comment)]
Schema {
schema: PathBuf,
#[arg(short, long, default_value = "10")]
rows: usize,
#[arg(short, long, default_value = "json")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
},
#[command(verbatim_doc_comment)]
MockOpenapi {
spec: PathBuf,
#[arg(short, long, default_value = "5")]
rows: usize,
#[arg(short, long, default_value = "json")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long)]
realistic: bool,
#[arg(long)]
include_optional: bool,
#[arg(long)]
validate: bool,
#[arg(long, default_value = "3")]
array_size: usize,
#[arg(long, default_value = "10")]
max_array_size: usize,
},
#[command(verbatim_doc_comment)]
MockServer {
spec: PathBuf,
#[arg(short, long, default_value = "3000")]
port: u16,
#[arg(long, default_value = "127.0.0.1")]
host: String,
#[arg(long)]
cors: bool,
#[arg(long)]
log_requests: bool,
#[arg(long)]
delay: Vec<String>,
#[arg(long)]
realistic: bool,
#[arg(long)]
include_optional: bool,
#[arg(long)]
validate: bool,
},
}
#[derive(Subcommand)]
enum ChaosCommands {
Profile {
#[command(subcommand)]
profile_command: ProfileCommands,
},
}
#[derive(Subcommand)]
enum ProfileCommands {
Apply {
name: String,
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
Export {
name: String,
#[arg(long, default_value = "json")]
format: String,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
Import {
#[arg(short, long)]
file: PathBuf,
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
List {
#[arg(long, default_value = "http://localhost:3000")]
base_url: String,
},
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let cli = Cli::parse();
let initial_logging_config = mockforge_observability::LoggingConfig {
level: cli.log_level.clone(),
json_format: false, file_path: None,
max_file_size_mb: 10,
max_files: 5,
};
if let Err(e) = mockforge_observability::init_logging(initial_logging_config) {
eprintln!("Failed to initialize logging: {}", e);
std::process::exit(1);
}
match cli.command {
Commands::Serve {
config,
profile,
http_port,
ws_port,
grpc_port,
smtp_port: _smtp_port,
mqtt_port: _mqtt_port,
kafka_port: _kafka_port,
amqp_port: _amqp_port,
tcp_port,
admin,
admin_port,
metrics,
metrics_port,
tracing,
tracing_service_name,
tracing_environment,
jaeger_endpoint,
tracing_sampling_rate,
recorder,
recorder_db,
recorder_no_api,
recorder_api_port,
recorder_max_requests,
recorder_retention_days,
chaos,
chaos_scenario,
chaos_latency_ms,
chaos_latency_range,
chaos_latency_probability,
chaos_http_errors,
chaos_http_error_probability,
chaos_rate_limit,
chaos_bandwidth_limit,
chaos_packet_loss,
chaos_grpc: _,
chaos_grpc_status_codes: _,
chaos_grpc_stream_interruption_probability: _,
chaos_websocket: _,
chaos_websocket_close_codes: _,
chaos_websocket_message_drop_probability: _,
chaos_websocket_message_corruption_probability: _,
chaos_graphql: _,
chaos_graphql_error_codes: _,
chaos_graphql_partial_data_probability: _,
chaos_graphql_resolver_latency: _,
circuit_breaker: _,
circuit_breaker_failure_threshold: _,
circuit_breaker_success_threshold: _,
circuit_breaker_timeout_ms: _,
circuit_breaker_failure_rate: _,
bulkhead: _,
bulkhead_max_concurrent: _,
bulkhead_max_queue: _,
bulkhead_queue_timeout_ms: _,
spec,
ws_replay_file,
graphql,
graphql_port,
graphql_upstream,
traffic_shaping,
bandwidth_limit,
burst_size,
network_profile,
list_network_profiles,
chaos_random,
chaos_random_error_rate,
chaos_random_delay_rate,
chaos_random_min_delay,
chaos_random_max_delay,
chaos_profile,
ai_enabled,
reality_level,
rag_provider,
rag_model,
rag_api_key,
dry_run,
progress,
verbose,
} => {
if list_network_profiles {
let catalog = mockforge_core::NetworkProfileCatalog::new();
println!("\n📡 Available Network Profiles:\n");
for (name, description) in catalog.list_profiles_with_description() {
println!(" • {:<20} {}", name, description);
}
println!();
return Ok(());
}
handle_serve(
config,
profile,
http_port,
ws_port,
grpc_port,
_smtp_port,
tcp_port,
admin,
admin_port,
metrics,
metrics_port,
tracing,
tracing_service_name,
tracing_environment,
jaeger_endpoint,
tracing_sampling_rate,
recorder,
recorder_db,
recorder_no_api,
recorder_api_port,
recorder_max_requests,
recorder_retention_days,
chaos,
chaos_scenario,
chaos_latency_ms,
chaos_latency_range,
chaos_latency_probability,
chaos_http_errors,
chaos_http_error_probability,
chaos_rate_limit,
chaos_bandwidth_limit,
chaos_packet_loss,
spec,
ws_replay_file,
graphql,
graphql_port,
graphql_upstream,
traffic_shaping,
bandwidth_limit,
burst_size,
network_profile,
chaos_random,
chaos_random_error_rate,
chaos_random_delay_rate,
chaos_random_min_delay,
chaos_random_max_delay,
chaos_profile,
ai_enabled,
reality_level,
rag_provider,
rag_model,
rag_api_key,
dry_run,
progress,
verbose,
)
.await?;
}
#[cfg(feature = "smtp")]
Commands::Smtp { smtp_command } => {
smtp_commands::handle_smtp_command(smtp_command).await?;
}
#[cfg(feature = "mqtt")]
Commands::Mqtt { mqtt_command } => {
mqtt_commands::handle_mqtt_command(mqtt_command).await?;
}
#[cfg(feature = "ftp")]
Commands::Ftp { ftp_command } => {
ftp_commands::handle_ftp_command(ftp_command).await?;
}
#[cfg(feature = "kafka")]
Commands::Kafka { kafka_command } => {
kafka_commands::handle_kafka_command(kafka_command).await?;
}
#[cfg(feature = "amqp")]
Commands::Amqp { amqp_command } => {
amqp_commands::execute_amqp_command(amqp_command).await?;
}
Commands::Data { data_command } => {
handle_data(data_command).await?;
}
Commands::Admin { port, config } => {
handle_admin(port, config).await?;
}
Commands::Sync {
workspace_dir,
config,
} => {
handle_sync(workspace_dir, config).await?;
}
Commands::Quick {
file,
port,
admin,
admin_port,
metrics,
metrics_port,
logging,
host,
} => {
handle_quick(file, port, host, admin, admin_port, metrics, metrics_port, logging)
.await?;
}
Commands::Completions { shell } => {
handle_completions(shell);
}
Commands::Init { name, no_examples } => {
handle_init(name, no_examples).await?;
}
Commands::Wizard => {
let config = wizard::run_wizard().await?;
wizard::generate_project(&config).await?;
}
Commands::Generate {
config,
spec,
output,
verbose,
dry_run,
watch,
watch_debounce,
progress,
} => {
handle_generate(
config,
spec,
output,
verbose,
dry_run,
watch,
watch_debounce,
progress,
)
.await?;
}
Commands::Schema { output } => {
handle_schema(output).await?;
}
Commands::DevSetup { args } => {
dev_setup_commands::execute_dev_setup(args).await?;
}
Commands::Config { config_command } => {
handle_config(config_command).await?;
}
Commands::GitWatch {
repository_url,
branch,
spec_paths,
poll_interval,
auth_token,
cache_dir,
reload_command,
} => {
git_watch_commands::handle_git_watch(
repository_url,
branch,
spec_paths,
poll_interval,
auth_token,
cache_dir,
reload_command,
)
.await?;
}
Commands::ContractSync {
repository_url,
branch,
spec_paths,
mock_config,
auth_token,
cache_dir,
strict,
output,
update,
} => {
contract_sync_commands::handle_contract_sync(
repository_url,
branch,
spec_paths,
mock_config,
auth_token,
cache_dir,
strict,
output,
update,
)
.await?;
}
Commands::ContractDiff { diff_command } => {
handle_contract_diff(diff_command).await?;
}
Commands::Import { import_command } => {
import_commands::handle_import_command(import_command).await?;
}
Commands::TestAi { ai_command } => {
handle_test_ai(ai_command).await?;
}
Commands::Plugin { plugin_command } => {
plugin_commands::handle_plugin_command(plugin_command).await?;
}
Commands::Recorder { recorder_command } => {
recorder_commands::handle_recorder_command(recorder_command).await?;
}
Commands::Flow { flow_command } => {
flow_commands::handle_flow_command(flow_command).await?;
}
Commands::Scenario { scenario_command } => {
scenario_commands::handle_scenario_command(scenario_command).await?;
}
Commands::Template { template_command } => {
template_commands::handle_template_command(template_command).await?;
}
Commands::Blueprint { blueprint_command } => match blueprint_command {
blueprint_commands::BlueprintCommands::List { detailed, category } => {
blueprint_commands::list_blueprints(detailed, category)?;
}
blueprint_commands::BlueprintCommands::Create {
name,
blueprint,
output,
force,
} => {
blueprint_commands::create_from_blueprint(name, blueprint, output, force)?;
}
blueprint_commands::BlueprintCommands::Info { blueprint_id } => {
blueprint_commands::show_blueprint_info(blueprint_id)?;
}
},
Commands::Client { client_command } => {
client_generator::execute_client_command(client_command).await?;
}
Commands::Backend { backend_command } => {
backend_generator::handle_backend_command(backend_command).await?;
}
Commands::Workspace { workspace_command } => {
workspace_commands::handle_workspace_command(workspace_command).await?;
}
Commands::Cloud { cloud_command } => {
cloud_commands::handle_cloud_command(cloud_command)
.await
.map_err(|e| anyhow::anyhow!("Cloud command failed: {}", e))?;
}
Commands::Tunnel { tunnel_command } => {
tunnel_commands::handle_tunnel_command(tunnel_command)
.await
.map_err(|e| anyhow::anyhow!("Tunnel command failed: {}", e))?;
}
Commands::Deploy { deploy_command } => {
deploy_commands::handle_deploy_command(deploy_command)
.await
.map_err(|e| anyhow::anyhow!("Deploy command failed: {}", e))?;
}
Commands::Vbr { vbr_command } => {
vbr_commands::execute_vbr_command(vbr_command)
.await
.map_err(|e| anyhow::anyhow!("VBR command failed: {}", e))?;
}
Commands::Snapshot { snapshot_command } => {
snapshot_commands::handle_snapshot_command(snapshot_command)
.await
.map_err(|e| anyhow::anyhow!("Snapshot command failed: {}", e))?;
}
Commands::Mockai { mockai_command } => {
mockai_commands::handle_mockai_command(mockai_command)
.await
.map_err(|e| anyhow::anyhow!("MockAI command failed: {}", e))?;
}
Commands::Voice { voice_command } => {
voice_commands::handle_voice_command(voice_command)
.await
.map_err(|e| anyhow::anyhow!("Voice command failed: {}", e))?;
}
Commands::Chaos { chaos_command } => {
handle_chaos_command(chaos_command).await?;
}
Commands::Time {
time_command,
admin_url,
} => {
time_commands::execute_time_command(time_command, admin_url)
.await
.map_err(|e| anyhow::anyhow!("Time command failed: {}", e))?;
}
Commands::Orchestrate {
orchestrate_command,
} => {
handle_orchestrate(orchestrate_command).await?;
}
Commands::GenerateTests {
database,
format,
output,
protocol,
method,
path,
status_code,
limit,
suite_name,
base_url,
ai_descriptions,
llm_provider,
llm_model,
llm_endpoint,
llm_api_key,
validate_body,
validate_status,
validate_headers,
validate_timing,
max_duration_ms,
} => {
handle_generate_tests(
database,
format,
output,
protocol,
method,
path,
status_code,
limit,
suite_name,
base_url,
ai_descriptions,
llm_provider,
llm_model,
llm_endpoint,
llm_api_key,
validate_body,
validate_status,
validate_headers,
validate_timing,
max_duration_ms,
)
.await?;
}
Commands::Suggest {
from,
from_description,
format,
output,
num_suggestions,
include_examples,
domain,
llm_provider,
llm_model,
llm_endpoint,
llm_api_key,
temperature,
print_json,
} => {
handle_suggest(
from,
from_description,
format,
output,
num_suggestions,
include_examples,
domain,
llm_provider,
llm_model,
llm_endpoint,
llm_api_key,
temperature,
print_json,
)
.await?;
}
Commands::Bench {
spec,
target,
duration,
vus,
scenario,
operations,
auth,
headers,
output,
generate_only,
script_output,
threshold_percentile,
threshold_ms,
max_error_rate,
verbose,
} => {
let bench_cmd = mockforge_bench::BenchCommand {
spec,
target,
duration,
vus,
scenario,
operations,
auth,
headers,
output,
generate_only,
script_output,
threshold_percentile,
threshold_ms,
max_error_rate,
verbose,
};
if let Err(e) = bench_cmd.execute().await {
eprintln!("Error: {}", e);
std::process::exit(1);
}
}
}
Ok(())
}
#[derive(Debug)]
struct ServeArgs {
config_path: Option<PathBuf>,
profile: Option<String>,
http_port: Option<u16>,
ws_port: Option<u16>,
grpc_port: Option<u16>,
tcp_port: Option<u16>,
admin: bool,
admin_port: Option<u16>,
metrics: bool,
metrics_port: Option<u16>,
tracing: bool,
tracing_service_name: String,
tracing_environment: String,
jaeger_endpoint: String,
tracing_sampling_rate: f64,
recorder: bool,
recorder_db: String,
recorder_no_api: bool,
recorder_api_port: Option<u16>,
recorder_max_requests: i64,
recorder_retention_days: i64,
chaos: bool,
chaos_scenario: Option<String>,
chaos_latency_ms: Option<u64>,
chaos_latency_range: Option<String>,
chaos_latency_probability: f64,
chaos_http_errors: Option<String>,
chaos_http_error_probability: f64,
chaos_rate_limit: Option<u32>,
chaos_bandwidth_limit: Option<u64>,
chaos_packet_loss: Option<f64>,
spec: Option<PathBuf>,
ws_replay_file: Option<PathBuf>,
graphql: Option<PathBuf>,
graphql_port: Option<u16>,
graphql_upstream: Option<String>,
traffic_shaping: bool,
bandwidth_limit: u64,
burst_size: u64,
ai_enabled: bool,
rag_provider: Option<String>,
rag_model: Option<String>,
rag_api_key: Option<String>,
network_profile: Option<String>,
chaos_random: bool,
chaos_random_error_rate: f64,
chaos_random_delay_rate: f64,
chaos_random_min_delay: u64,
chaos_random_max_delay: u64,
reality_level: Option<u8>,
dry_run: bool,
progress: bool,
verbose: bool,
}
#[cfg(test)]
mod cli_tests {
use super::*;
#[test]
fn parses_admin_port_override() {
let cli = Cli::parse_from([
"mockforge",
"serve",
"--admin",
"--admin-port",
"3100",
"--http-port",
"3200",
"--ws-port",
"3201",
"--grpc-port",
"5200",
]);
match cli.command {
Commands::Serve { admin_port, .. } => assert_eq!(admin_port, Some(3100)),
_ => panic!("expected serve command"),
}
}
}
async fn build_server_config_from_cli(serve_args: &ServeArgs) -> ServerConfig {
use mockforge_core::config::{
discover_config_file_all_formats, load_config_auto, load_config_with_profile,
};
let mut config = if let Some(path) = &serve_args.config_path {
println!("📄 Loading configuration from: {}", path.display());
match load_config_auto(path).await {
Ok(cfg) => {
if let Some(profile_name) = &serve_args.profile {
match load_config_with_profile(path, Some(profile_name)).await {
Ok(cfg_with_profile) => {
println!("✅ Applied profile: {}", profile_name);
cfg_with_profile
}
Err(e) => {
eprintln!("⚠️ Failed to apply profile '{}': {}", profile_name, e);
eprintln!(" Using base configuration without profile");
cfg
}
}
} else {
cfg
}
}
Err(e) => {
eprintln!("⚠️ Failed to load config file: {}", e);
eprintln!(" Using default configuration");
ServerConfig::default()
}
}
} else {
match discover_config_file_all_formats().await {
Ok(discovered_path) => {
println!("📄 Auto-discovered configuration from: {}", discovered_path.display());
match load_config_auto(&discovered_path).await {
Ok(cfg) => {
if let Some(profile_name) = &serve_args.profile {
match load_config_with_profile(&discovered_path, Some(profile_name))
.await
{
Ok(cfg_with_profile) => {
println!("✅ Applied profile: {}", profile_name);
cfg_with_profile
}
Err(e) => {
eprintln!(
"⚠️ Failed to apply profile '{}': {}",
profile_name, e
);
eprintln!(" Using base configuration without profile");
cfg
}
}
} else {
cfg
}
}
Err(e) => {
eprintln!("⚠️ Failed to load auto-discovered config: {}", e);
ServerConfig::default()
}
}
}
Err(_) => {
if serve_args.profile.is_some() {
eprintln!("⚠️ Profile specified but no config file found");
eprintln!(" Using default configuration");
}
ServerConfig::default()
}
}
};
config = apply_env_overrides(config);
if let Some(http_port) = serve_args.http_port {
config.http.port = http_port;
}
if let Some(spec_path) = &serve_args.spec {
config.http.openapi_spec = Some(spec_path.to_string_lossy().to_string());
}
if let Some(ws_port) = serve_args.ws_port {
config.websocket.port = ws_port;
}
if let Some(replay_path) = &serve_args.ws_replay_file {
config.websocket.replay_file = Some(replay_path.to_string_lossy().to_string());
}
if let Some(graphql_port) = serve_args.graphql_port {
config.graphql.port = graphql_port;
}
if let Some(schema_path) = &serve_args.graphql {
config.graphql.schema_path = Some(schema_path.to_string_lossy().to_string());
}
if let Some(upstream_url) = &serve_args.graphql_upstream {
config.graphql.upstream_url = Some(upstream_url.clone());
}
if let Some(grpc_port) = serve_args.grpc_port {
config.grpc.port = grpc_port;
}
if let Some(tcp_port) = serve_args.tcp_port {
config.tcp.port = tcp_port;
}
if serve_args.admin {
config.admin.enabled = true;
}
if let Some(admin_port) = serve_args.admin_port {
config.admin.port = admin_port;
}
if serve_args.metrics {
config.observability.prometheus.enabled = true;
}
if let Some(metrics_port) = serve_args.metrics_port {
config.observability.prometheus.port = metrics_port;
}
if serve_args.tracing {
config.observability.opentelemetry = Some(mockforge_core::config::OpenTelemetryConfig {
enabled: true,
service_name: serve_args.tracing_service_name.clone(),
environment: serve_args.tracing_environment.clone(),
jaeger_endpoint: serve_args.jaeger_endpoint.clone(),
otlp_endpoint: None,
protocol: "grpc".to_string(),
sampling_rate: serve_args.tracing_sampling_rate,
});
}
if serve_args.recorder {
config.observability.recorder = Some(mockforge_core::config::RecorderConfig {
enabled: true,
database_path: serve_args.recorder_db.clone(),
api_enabled: !serve_args.recorder_no_api,
api_port: serve_args.recorder_api_port,
max_requests: serve_args.recorder_max_requests,
retention_days: serve_args.recorder_retention_days,
record_http: true,
record_grpc: true,
record_websocket: true,
record_graphql: true,
record_proxy: true,
});
}
if serve_args.chaos {
let mut chaos_config = mockforge_core::config::ChaosEngConfig {
enabled: true,
scenario: serve_args.chaos_scenario.clone(),
latency: None,
fault_injection: None,
rate_limit: None,
traffic_shaping: None,
};
if serve_args.chaos_latency_ms.is_some() || serve_args.chaos_latency_range.is_some() {
let random_delay_range_ms = serve_args.chaos_latency_range.as_ref().and_then(|range| {
let parts: Vec<&str> = range.split('-').collect();
if parts.len() == 2 {
let min = parts[0].parse::<u64>().ok()?;
let max = parts[1].parse::<u64>().ok()?;
Some((min, max))
} else {
None
}
});
chaos_config.latency = Some(mockforge_core::config::LatencyInjectionConfig {
enabled: true,
fixed_delay_ms: serve_args.chaos_latency_ms,
random_delay_range_ms,
jitter_percent: 0.0,
probability: serve_args.chaos_latency_probability,
});
}
if serve_args.chaos_http_errors.is_some() {
let http_errors = serve_args
.chaos_http_errors
.as_ref()
.map(|errors| {
errors.split(',').filter_map(|s| s.trim().parse::<u16>().ok()).collect()
})
.unwrap_or_default();
chaos_config.fault_injection = Some(mockforge_core::config::FaultConfig {
enabled: true,
http_errors,
http_error_probability: serve_args.chaos_http_error_probability,
connection_errors: false,
connection_error_probability: 0.0,
timeout_errors: false,
timeout_ms: 30000,
timeout_probability: 0.0,
});
}
if let Some(rps) = serve_args.chaos_rate_limit {
chaos_config.rate_limit = Some(mockforge_core::config::RateLimitingConfig {
enabled: true,
requests_per_second: rps,
burst_size: rps * 2,
per_ip: false,
per_endpoint: false,
});
}
if serve_args.chaos_bandwidth_limit.is_some() || serve_args.chaos_packet_loss.is_some() {
chaos_config.traffic_shaping = Some(mockforge_core::config::NetworkShapingConfig {
enabled: true,
bandwidth_limit_bps: serve_args.chaos_bandwidth_limit.unwrap_or(1_000_000),
packet_loss_percent: serve_args.chaos_packet_loss.unwrap_or(0.0),
max_connections: 100,
});
}
config.observability.chaos = Some(chaos_config);
}
if serve_args.traffic_shaping {
config.core.traffic_shaping_enabled = true;
config.core.traffic_shaping.bandwidth.enabled = true;
config.core.traffic_shaping.bandwidth.max_bytes_per_sec = serve_args.bandwidth_limit;
config.core.traffic_shaping.bandwidth.burst_capacity_bytes = serve_args.burst_size;
}
if serve_args.ai_enabled {
config.data.rag.enabled = true;
if let Some(provider) = &serve_args.rag_provider {
config.data.rag.provider = provider.clone();
}
if let Some(model) = &serve_args.rag_model {
config.data.rag.model = Some(model.clone());
}
if let Some(api_key) = &serve_args.rag_api_key {
config.data.rag.api_key = Some(api_key.clone());
}
}
if let Some(level_value) = serve_args.reality_level {
if let Some(level) = mockforge_core::RealityLevel::from_value(level_value) {
config.reality.level = level;
config.reality.enabled = true;
println!("🎚️ Reality level set to {} ({})", level.value(), level.name());
let reality_engine = mockforge_core::RealityEngine::with_level(level);
reality_engine.apply_to_config(&mut config).await;
} else {
eprintln!(
"⚠️ Invalid reality level: {}. Must be between 1 and 5. Using default.",
level_value
);
}
} else if config.reality.enabled {
let level = config.reality.level;
let reality_engine = mockforge_core::RealityEngine::with_level(level);
reality_engine.apply_to_config(&mut config).await;
}
config
}
fn ensure_ports_available(ports: &[(u16, &str)]) -> Result<(), String> {
let mut unavailable_ports = Vec::new();
for (port, name) in ports {
match std::net::TcpListener::bind(("127.0.0.1", *port)) {
Ok(_) => {}
Err(err) => unavailable_ports.push((*port, *name, err)),
}
}
if unavailable_ports.is_empty() {
return Ok(());
}
let mut error_msg = String::from("One or more ports are already in use:\n\n");
for (port, name, err) in &unavailable_ports {
error_msg.push_str(&format!(" • {} port {}: {}\n", name, port, err));
}
error_msg.push_str("\nPossible solutions:\n");
error_msg.push_str(" 1. Stop the process using these ports\n");
error_msg.push_str(" 2. Use different ports with flags like --http-port, --ws-port, etc.\n");
error_msg.push_str(
" 3. Find the process using the port with: lsof -i :<port> or netstat -tulpn | grep <port>\n",
);
Err(error_msg)
}
async fn validate_serve_config(
config_path: &Option<PathBuf>,
spec_path: &Option<PathBuf>,
ports: &[(u16, &str)],
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use std::fs;
if let Some(config) = config_path {
if !config.exists() {
return Err(format!(
"Configuration file not found: {}\n\n\
Hint: Check that the path is correct and the file exists.",
config.display()
)
.into());
}
if let Err(e) = fs::read_to_string(config) {
return Err(format!(
"Cannot read configuration file: {}\n\n\
Error: {}\n\
Hint: Check file permissions and ensure the file is readable.",
config.display(),
e
)
.into());
}
}
if let Some(spec) = spec_path {
if !spec.exists() {
return Err(format!(
"OpenAPI spec file not found: {}\n\n\
Hint: Check that the path is correct and the file exists.",
spec.display()
)
.into());
}
if let Err(e) = fs::read_to_string(spec) {
return Err(format!(
"Cannot read OpenAPI spec file: {}\n\n\
Error: {}\n\
Hint: Check file permissions and ensure the file is readable.",
spec.display(),
e
)
.into());
}
}
if let Err(err) = ensure_ports_available(ports) {
return Err(err.into());
}
Ok(())
}
fn initialize_opentelemetry_tracing(
otel_config: &mockforge_core::config::OpenTelemetryConfig,
logging_config: &mockforge_observability::LoggingConfig,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_tracing::{init_tracer, TracingConfig};
let tracing_config = if let Some(ref otlp_endpoint) = otel_config.otlp_endpoint {
TracingConfig::with_otlp(otel_config.service_name.clone(), otlp_endpoint.clone())
} else {
TracingConfig::with_jaeger(
otel_config.service_name.clone(),
otel_config.jaeger_endpoint.clone(),
)
}
.with_sampling_rate(otel_config.sampling_rate)
.with_environment(otel_config.environment.clone());
let _tracer = init_tracer(tracing_config)?;
tracing::info!("OpenTelemetry tracing initialized successfully");
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn handle_serve(
config_path: Option<PathBuf>,
profile: Option<String>,
http_port: Option<u16>,
ws_port: Option<u16>,
grpc_port: Option<u16>,
_smtp_port: Option<u16>,
tcp_port: Option<u16>,
admin: bool,
admin_port: Option<u16>,
metrics: bool,
metrics_port: Option<u16>,
tracing: bool,
tracing_service_name: String,
tracing_environment: String,
jaeger_endpoint: String,
tracing_sampling_rate: f64,
recorder: bool,
recorder_db: String,
recorder_no_api: bool,
recorder_api_port: Option<u16>,
recorder_max_requests: i64,
recorder_retention_days: i64,
chaos: bool,
chaos_scenario: Option<String>,
chaos_latency_ms: Option<u64>,
chaos_latency_range: Option<String>,
chaos_latency_probability: f64,
chaos_http_errors: Option<String>,
chaos_http_error_probability: f64,
chaos_rate_limit: Option<u32>,
chaos_bandwidth_limit: Option<u64>,
chaos_packet_loss: Option<f64>,
spec: Option<PathBuf>,
ws_replay_file: Option<PathBuf>,
graphql: Option<PathBuf>,
graphql_port: Option<u16>,
graphql_upstream: Option<String>,
traffic_shaping: bool,
bandwidth_limit: u64,
burst_size: u64,
network_profile: Option<String>,
chaos_random: bool,
chaos_random_error_rate: f64,
chaos_random_delay_rate: f64,
chaos_random_min_delay: u64,
chaos_random_max_delay: u64,
chaos_profile: Option<String>,
ai_enabled: bool,
reality_level: Option<u8>,
rag_provider: Option<String>,
rag_model: Option<String>,
rag_api_key: Option<String>,
dry_run: bool,
progress: bool,
verbose: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let effective_config_path = if config_path.is_some() {
config_path.clone()
} else {
if let Ok(current_dir) = std::env::current_dir() {
let config_names = vec![
"mockforge.yaml",
"mockforge.yml",
".mockforge.yaml",
".mockforge.yml",
];
let mut discovered = None;
for name in &config_names {
let path = current_dir.join(name);
if path.exists() {
discovered = Some(path);
break;
}
}
discovered
} else {
None
}
};
let serve_args = ServeArgs {
config_path: effective_config_path.clone(),
profile,
http_port,
ws_port,
grpc_port,
tcp_port,
admin,
admin_port,
metrics,
metrics_port,
tracing,
tracing_service_name,
tracing_environment,
jaeger_endpoint,
tracing_sampling_rate,
recorder,
recorder_db,
recorder_no_api,
recorder_api_port,
recorder_max_requests,
recorder_retention_days,
chaos,
chaos_scenario,
chaos_latency_ms,
chaos_latency_range,
chaos_latency_probability,
chaos_http_errors,
chaos_http_error_probability,
chaos_rate_limit,
chaos_bandwidth_limit,
chaos_packet_loss,
spec,
ws_replay_file,
graphql,
graphql_port,
graphql_upstream,
traffic_shaping,
bandwidth_limit,
burst_size,
ai_enabled,
rag_provider,
rag_model,
rag_api_key,
network_profile,
chaos_random,
chaos_random_error_rate,
chaos_random_delay_rate,
chaos_random_min_delay,
chaos_random_max_delay,
reality_level: reality_level.or_else(|| {
std::env::var("MOCKFORGE_REALITY_LEVEL").ok().and_then(|v| v.parse::<u8>().ok())
}),
dry_run,
progress,
verbose,
};
validate_serve_config(&serve_args.config_path, &serve_args.spec, &[]).await?;
let mut config = build_server_config_from_cli(&serve_args).await;
let mut final_ports = vec![
(config.http.port, "HTTP"),
(config.websocket.port, "WebSocket"),
(config.grpc.port, "gRPC"),
];
if config.admin.enabled {
final_ports.push((config.admin.port, "Admin UI"));
}
if config.observability.prometheus.enabled {
final_ports.push((config.observability.prometheus.port, "Metrics"));
}
if let Err(port_error) = ensure_ports_available(&final_ports) {
return Err(port_error.into());
}
if serve_args.dry_run {
println!("✅ Configuration validation passed!");
println!("✅ All required ports are available");
if serve_args.config_path.is_some() {
println!("✅ Configuration file is valid");
}
if serve_args.spec.is_some() {
println!("✅ OpenAPI spec file is valid");
}
println!("\n🎉 Dry run successful - no issues found!");
return Ok(());
}
if !config.routes.is_empty() {
println!("📄 Found {} routes in config", config.routes.len());
} else {
println!("📄 No routes found in config");
}
if let Some(profile_name) = serve_args.network_profile {
use mockforge_core::NetworkProfileCatalog;
let catalog = NetworkProfileCatalog::new();
if let Some(profile) = catalog.get(&profile_name) {
println!("📡 Applying network profile: {} - {}", profile.name, profile.description);
let (latency_profile, traffic_shaping_config) = profile.apply();
config.core.default_latency = latency_profile;
config.core.latency_enabled = true;
config.core.traffic_shaping = traffic_shaping_config;
config.core.traffic_shaping_enabled = true;
} else {
eprintln!("⚠️ Warning: Unknown network profile '{}'. Use --list-network-profiles to see available profiles.", profile_name);
}
}
if serve_args.chaos_random {
use mockforge_core::ChaosConfig;
println!("🎲 Random chaos mode enabled");
println!(" Error rate: {:.1}%", chaos_random_error_rate * 100.0);
println!(" Delay rate: {:.1}%", chaos_random_delay_rate * 100.0);
println!(" Delay range: {}-{} ms", chaos_random_min_delay, chaos_random_max_delay);
let chaos_config = ChaosConfig::new(chaos_random_error_rate, chaos_random_delay_rate)
.with_delay_range(chaos_random_min_delay, chaos_random_max_delay);
config.core.chaos_random = Some(chaos_config);
}
let logging_config = mockforge_observability::LoggingConfig {
level: config.logging.level.clone(),
json_format: config.logging.json_format,
file_path: config.logging.file_path.as_ref().map(|p| p.into()),
max_file_size_mb: config.logging.max_file_size_mb,
max_files: config.logging.max_files,
};
if let Some(ref otel_config) = config.observability.opentelemetry {
if otel_config.enabled {
if let Err(e) = initialize_opentelemetry_tracing(otel_config, &logging_config) {
tracing::warn!("Failed to initialize OpenTelemetry tracing: {}", e);
if let Err(e) = mockforge_observability::init_logging(logging_config) {
eprintln!("Failed to initialize logging: {}", e);
}
}
}
}
println!("🚀 Starting MockForge servers...");
println!("📡 HTTP server on port {}", config.http.port);
println!("🔌 WebSocket server on port {}", config.websocket.port);
println!("⚡ gRPC server on port {}", config.grpc.port);
if config.tcp.enabled {
println!("🔌 TCP server on port {}", config.tcp.port);
}
if config.admin.enabled {
println!("🎛️ Admin UI on port {}", config.admin.port);
}
if config.observability.prometheus.enabled {
println!("📊 Metrics endpoint on port {}", config.observability.prometheus.port);
}
if let Some(ref tracing_config) = config.observability.opentelemetry {
if tracing_config.enabled {
println!("🔍 OpenTelemetry tracing enabled");
println!(" Service: {}", tracing_config.service_name);
println!(" Environment: {}", tracing_config.environment);
println!(" Jaeger endpoint: {}", tracing_config.jaeger_endpoint);
}
}
if let Some(ref recorder_config) = config.observability.recorder {
if recorder_config.enabled {
println!("📹 API Flight Recorder enabled");
println!(" Database: {}", recorder_config.database_path);
println!(" Max requests: {}", recorder_config.max_requests);
}
}
if let Some(ref chaos_config) = config.observability.chaos {
if chaos_config.enabled {
println!("🌀 Chaos engineering enabled");
if let Some(ref scenario) = chaos_config.scenario {
println!(" Scenario: {}", scenario);
}
}
}
if config.data.rag.enabled {
println!("🧠 AI features enabled");
println!(" Provider: {}", config.data.rag.provider);
if let Some(ref model) = config.data.rag.model {
println!(" Model: {}", model);
}
}
if config.core.traffic_shaping_enabled {
println!("🚦 Traffic shaping enabled");
println!(
" Bandwidth limit: {} bytes/sec",
config.core.traffic_shaping.bandwidth.max_bytes_per_sec
);
}
if let Some(ref api_key) = config.data.rag.api_key {
std::env::set_var("MOCKFORGE_RAG_API_KEY", api_key);
}
std::env::set_var("MOCKFORGE_RAG_PROVIDER", &config.data.rag.provider);
if let Some(ref model) = config.data.rag.model {
std::env::set_var("MOCKFORGE_RAG_MODEL", model);
}
init_key_store();
tokio::spawn(async {
use mockforge_core::request_capture::init_global_capture_manager;
init_global_capture_manager(1000); tracing::info!(
"Request capture manager initialized for contract diff analysis (lazy-loaded)"
);
});
let siem_config = config.security.monitoring.siem.clone();
if siem_config.enabled {
use mockforge_core::security::init_global_siem_emitter;
tokio::spawn(async move {
if let Err(e) = init_global_siem_emitter(siem_config.clone()).await {
tracing::warn!("Failed to initialize SIEM emitter: {}", e);
} else {
tracing::info!(
"SIEM emitter initialized with {} destinations (lazy-loaded)",
siem_config.destinations.len()
);
}
});
}
let access_review_scheduler_handle = if config.security.monitoring.access_review.enabled {
use mockforge_core::security::{
access_review::{AccessReviewConfig, AccessReviewEngine},
access_review_notifications::{AccessReviewNotificationService, NotificationConfig},
access_review_scheduler::AccessReviewScheduler,
access_review_service::AccessReviewService,
api_tokens::InMemoryApiTokenStorage,
justification_storage::InMemoryJustificationStorage,
mfa_tracking::InMemoryMfaStorage,
};
use std::sync::Arc;
use tokio::sync::RwLock;
let token_storage: Arc<dyn mockforge_core::security::ApiTokenStorage> =
Arc::new(InMemoryApiTokenStorage::new());
let mfa_storage: Arc<dyn mockforge_core::security::MfaStorage> =
Arc::new(InMemoryMfaStorage::new());
let justification_storage: Arc<dyn mockforge_core::security::JustificationStorage> =
Arc::new(InMemoryJustificationStorage::new());
struct SimpleUserDataProvider;
#[async_trait::async_trait]
impl mockforge_core::security::UserDataProvider for SimpleUserDataProvider {
async fn get_all_users(
&self,
) -> Result<Vec<mockforge_core::security::UserAccessInfo>, mockforge_core::Error>
{
Ok(Vec::new())
}
async fn get_privileged_users(
&self,
) -> Result<Vec<mockforge_core::security::PrivilegedAccessInfo>, mockforge_core::Error>
{
Ok(Vec::new())
}
async fn get_api_tokens(
&self,
) -> Result<Vec<mockforge_core::security::ApiTokenInfo>, mockforge_core::Error>
{
Ok(Vec::new())
}
async fn get_user(
&self,
_user_id: uuid::Uuid,
) -> Result<Option<mockforge_core::security::UserAccessInfo>, mockforge_core::Error>
{
Ok(None)
}
async fn get_last_login(
&self,
_user_id: uuid::Uuid,
) -> Result<Option<chrono::DateTime<chrono::Utc>>, mockforge_core::Error> {
Ok(None)
}
async fn revoke_user_access(
&self,
_user_id: uuid::Uuid,
_reason: String,
) -> Result<(), mockforge_core::Error> {
Ok(())
}
async fn update_user_permissions(
&self,
_user_id: uuid::Uuid,
_roles: Vec<String>,
_permissions: Vec<String>,
) -> Result<(), mockforge_core::Error> {
Ok(())
}
}
let user_provider = SimpleUserDataProvider;
let review_config = config.security.monitoring.access_review.clone();
let review_config_for_scheduler = review_config.clone();
let engine = AccessReviewEngine::new(review_config.clone());
let review_service = AccessReviewService::new(engine, Box::new(user_provider));
let review_service_arc = Arc::new(RwLock::new(review_service));
let notification_config = NotificationConfig {
enabled: review_config.notifications.enabled,
channels: review_config
.notifications
.channels
.iter()
.map(|c| match c.as_str() {
"email" => mockforge_core::security::access_review_notifications::NotificationChannel::Email,
"slack" => mockforge_core::security::access_review_notifications::NotificationChannel::Slack,
"webhook" => mockforge_core::security::access_review_notifications::NotificationChannel::Webhook,
_ => mockforge_core::security::access_review_notifications::NotificationChannel::InApp,
})
.collect(),
recipients: review_config.notifications.recipients,
channel_config: std::collections::HashMap::new(),
};
let notification_service =
Arc::new(AccessReviewNotificationService::new(notification_config));
use mockforge_core::security::init_global_access_review_service;
if let Err(e) = init_global_access_review_service(review_service_arc.clone()).await {
tracing::warn!("Failed to initialize global access review service: {}", e);
} else {
tracing::info!("Global access review service initialized");
}
let scheduler = AccessReviewScheduler::with_notifications(
review_service_arc,
review_config_for_scheduler,
Some(notification_service),
);
let handle = scheduler.start();
tracing::info!("Access review scheduler started");
Some(handle)
} else {
None
};
let privileged_access_manager = if config.security.monitoring.privileged_access.require_mfa {
use mockforge_core::security::{
justification_storage::InMemoryJustificationStorage,
mfa_tracking::InMemoryMfaStorage,
privileged_access::{PrivilegedAccessConfig, PrivilegedAccessManager},
};
use std::sync::Arc;
let privileged_config = config.security.monitoring.privileged_access.clone();
let mfa_storage: Arc<dyn mockforge_core::security::MfaStorage> =
Arc::new(InMemoryMfaStorage::new());
let justification_storage: Arc<dyn mockforge_core::security::JustificationStorage> =
Arc::new(InMemoryJustificationStorage::new());
let manager = PrivilegedAccessManager::new(
privileged_config,
Some(mfa_storage),
Some(justification_storage),
);
let manager_for_cleanup = Arc::new(RwLock::new(manager));
let cleanup_manager = manager_for_cleanup.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(300)); loop {
interval.tick().await;
if let Err(e) = cleanup_manager.write().await.cleanup_expired_sessions().await {
tracing::warn!("Failed to cleanup expired privileged sessions: {}", e);
}
}
});
use mockforge_core::security::init_global_privileged_access_manager;
if let Err(e) = init_global_privileged_access_manager(manager_for_cleanup.clone()).await {
tracing::warn!("Failed to initialize global privileged access manager: {}", e);
} else {
tracing::info!("Global privileged access manager initialized");
}
tracing::info!("Privileged access manager initialized");
Some(manager_for_cleanup)
} else {
None
};
let change_management_engine = if config.security.monitoring.change_management.enabled {
use mockforge_core::security::change_management::{
ChangeManagementConfig, ChangeManagementEngine,
};
use std::sync::Arc;
let change_config = config.security.monitoring.change_management.clone();
let engine = ChangeManagementEngine::new(change_config);
let engine_arc = Arc::new(RwLock::new(engine));
use mockforge_core::security::init_global_change_management_engine;
if let Err(e) = init_global_change_management_engine(engine_arc.clone()).await {
tracing::warn!("Failed to initialize global change management engine: {}", e);
} else {
tracing::info!("Global change management engine initialized");
}
tracing::info!("Change management engine initialized");
Some(engine_arc)
} else {
None
};
let compliance_dashboard_engine = if config.security.monitoring.compliance_dashboard.enabled {
use mockforge_core::security::compliance_dashboard::{
ComplianceDashboardConfig, ComplianceDashboardEngine,
};
use std::sync::Arc;
let dashboard_config = config.security.monitoring.compliance_dashboard.clone();
let engine = ComplianceDashboardEngine::new(dashboard_config);
let engine_arc = Arc::new(RwLock::new(engine));
use mockforge_core::security::init_global_compliance_dashboard_engine;
if let Err(e) = init_global_compliance_dashboard_engine(engine_arc.clone()).await {
tracing::warn!("Failed to initialize global compliance dashboard engine: {}", e);
} else {
tracing::info!("Global compliance dashboard engine initialized");
}
tracing::info!("Compliance dashboard engine initialized");
Some(engine_arc)
} else {
None
};
let risk_assessment_engine = if config.security.monitoring.risk_assessment.enabled {
use mockforge_core::security::risk_assessment::{
RiskAssessmentConfig, RiskAssessmentEngine,
};
use std::sync::Arc;
let risk_config = config.security.monitoring.risk_assessment.clone();
let engine = RiskAssessmentEngine::new(risk_config);
let engine_arc = Arc::new(RwLock::new(engine));
use mockforge_core::security::init_global_risk_assessment_engine;
if let Err(e) = init_global_risk_assessment_engine(engine_arc.clone()).await {
tracing::warn!("Failed to initialize global risk assessment engine: {}", e);
} else {
tracing::info!("Global risk assessment engine initialized");
}
tracing::info!("Risk assessment engine initialized");
Some(engine_arc)
} else {
None
};
let multi_tenant_config = if config.multi_tenant.enabled {
Some(config.multi_tenant.clone())
} else {
None
};
#[cfg(feature = "smtp")]
let smtp_registry = if config.smtp.enabled {
use mockforge_smtp::SmtpSpecRegistry;
use std::sync::Arc;
let mut registry = SmtpSpecRegistry::new();
if let Some(fixtures_dir) = &config.smtp.fixtures_dir {
if fixtures_dir.exists() {
if let Err(e) = registry.load_fixtures(fixtures_dir) {
eprintln!(
"⚠️ Warning: Failed to load SMTP fixtures from {:?}: {}",
fixtures_dir, e
);
} else {
println!(" Loaded SMTP fixtures from {:?}", fixtures_dir);
}
} else {
println!(" No SMTP fixtures directory found at {:?}", fixtures_dir);
}
}
Some(Arc::new(registry) as Arc<dyn Any + Send + Sync>)
} else {
None
};
#[cfg(not(feature = "smtp"))]
let smtp_registry = None::<Arc<dyn std::any::Any + Send + Sync>>;
#[cfg(feature = "mqtt")]
let mqtt_registry = if config.mqtt.enabled {
use mockforge_mqtt::MqttSpecRegistry;
use std::sync::Arc;
let mut registry = MqttSpecRegistry::new();
if let Some(fixtures_dir) = &config.mqtt.fixtures_dir {
if fixtures_dir.exists() {
if let Err(e) = registry.load_fixtures(fixtures_dir) {
eprintln!(
"⚠️ Warning: Failed to load MQTT fixtures from {:?}: {}",
fixtures_dir, e
);
} else {
println!(" Loaded MQTT fixtures from {:?}", fixtures_dir);
}
} else {
println!(" No MQTT fixtures directory found at {:?}", fixtures_dir);
}
}
Some(Arc::new(registry))
} else {
None
};
#[cfg(feature = "mqtt")]
let mqtt_broker = if let Some(ref registry_ref) = mqtt_registry {
let mqtt_config = config.mqtt.clone();
let broker_config = mockforge_mqtt::broker::MqttConfig {
port: mqtt_config.port,
host: mqtt_config.host.clone(),
max_connections: mqtt_config.max_connections,
max_packet_size: mqtt_config.max_packet_size,
keep_alive_secs: mqtt_config.keep_alive_secs,
version: mockforge_mqtt::broker::MqttVersion::default(),
};
Some(Arc::new(mockforge_mqtt::MqttBroker::new(
broker_config.clone(),
registry_ref.clone(),
)))
} else {
None
};
#[cfg(feature = "mqtt")]
let mqtt_broker_for_http = mqtt_broker
.as_ref()
.map(|broker| Arc::clone(broker) as Arc<dyn Any + Send + Sync>);
#[cfg(not(feature = "mqtt"))]
let mqtt_broker_for_http = None::<Arc<dyn Any + Send + Sync>>;
use mockforge_http::HealthManager;
use std::sync::Arc;
use std::time::Duration;
let health_manager = Arc::new(HealthManager::with_init_timeout(Duration::from_secs(60)));
let health_manager_for_router = health_manager.clone();
use mockforge_core::TimeTravelManager;
use mockforge_ui::time_travel_handlers;
let time_travel_manager = {
let time_travel_config = config.core.time_travel.clone();
let manager = Arc::new(TimeTravelManager::new(time_travel_config));
time_travel_handlers::init_time_travel_manager(manager.clone());
if manager.clock().is_enabled() {
println!("⏰ Time travel enabled");
if let Some(virtual_time) = manager.clock().status().current_time {
println!(" Virtual time: {}", virtual_time);
}
println!(" Scale factor: {}x", manager.clock().get_scale());
}
let cron_scheduler = manager.cron_scheduler();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(1));
loop {
interval.tick().await;
if let Err(e) = cron_scheduler.check_and_execute().await {
tracing::warn!("Error checking cron jobs: {}", e);
}
}
});
manager
};
use mockforge_vbr::MutationRuleManager;
let mutation_rule_manager = Arc::new(MutationRuleManager::new());
time_travel_handlers::init_mutation_rule_manager(mutation_rule_manager.clone());
let mockai = if config.mockai.enabled {
use mockforge_core::intelligent_behavior::{IntelligentBehaviorConfig, MockAI};
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{info, warn};
let behavior_config = config.mockai.intelligent_behavior.clone();
let spec_path = config.http.openapi_spec.clone();
let mockai_arc = Arc::new(RwLock::new(MockAI::new(behavior_config.clone())));
let mockai_for_upgrade = mockai_arc.clone();
let behavior_config_for_upgrade = behavior_config.clone();
tokio::spawn(async move {
if let Some(ref spec_path) = spec_path {
match mockforge_core::openapi::OpenApiSpec::from_file(spec_path).await {
Ok(openapi_spec) => {
match MockAI::from_openapi(&openapi_spec, behavior_config_for_upgrade).await
{
Ok(instance) => {
*mockai_for_upgrade.write().await = instance;
info!("✅ MockAI upgraded with OpenAPI spec (background initialization)");
}
Err(e) => {
warn!("Failed to upgrade MockAI from OpenAPI spec: {}", e);
}
}
}
Err(e) => {
warn!("Failed to load OpenAPI spec for MockAI: {}", e);
}
}
}
});
Some(mockai_arc)
} else {
None
};
use mockforge_core::openapi_routes::{ValidationMode, ValidationOptions};
let request_mode = if let Some(ref validation) = config.http.validation {
match validation.mode.as_str() {
"off" | "disable" | "disabled" => ValidationMode::Disabled,
"warn" | "warning" => ValidationMode::Warn,
_ => ValidationMode::Enforce,
}
} else {
ValidationMode::Enforce
};
let validation_options = ValidationOptions {
request_mode,
aggregate_errors: config.http.aggregate_validation_errors,
validate_responses: config.http.validate_responses,
overrides: std::collections::HashMap::new(),
admin_skip_prefixes: vec!["/__mockforge".to_string(), "/health".to_string()],
response_template_expand: config.http.response_template_expand,
validation_status: config.http.validation_status,
};
let mut http_app = mockforge_http::build_router_with_chains_and_multi_tenant(
config.http.openapi_spec.clone(),
Some(validation_options),
None, multi_tenant_config,
Some(config.routes.clone()),
config.http.cors.clone(),
None, smtp_registry.as_ref().cloned(),
mqtt_broker_for_http,
None, false, Some(health_manager_for_router), mockai.clone(), Some(config.deceptive_deploy.clone()), None, )
.await;
let chaos_config = if let Some(ref chaos_eng_config) = config.observability.chaos {
let mut chaos_cfg = ChaosConfig {
enabled: chaos_eng_config.enabled,
latency: chaos_eng_config.latency.as_ref().map(|l| {
mockforge_chaos::config::LatencyConfig {
enabled: l.enabled,
fixed_delay_ms: l.fixed_delay_ms,
random_delay_range_ms: l.random_delay_range_ms,
jitter_percent: l.jitter_percent,
probability: l.probability,
}
}),
fault_injection: chaos_eng_config.fault_injection.as_ref().map(|f| {
mockforge_chaos::config::FaultInjectionConfig {
enabled: f.enabled,
http_errors: f.http_errors.clone(),
http_error_probability: f.http_error_probability,
connection_errors: f.connection_errors,
connection_error_probability: f.connection_error_probability,
timeout_errors: f.timeout_errors,
timeout_ms: f.timeout_ms,
timeout_probability: f.timeout_probability,
partial_responses: false,
partial_response_probability: 0.0,
payload_corruption: false,
payload_corruption_probability: 0.0,
corruption_type: mockforge_chaos::config::CorruptionType::None,
error_pattern: None,
mockai_enabled: false,
}
}),
rate_limit: chaos_eng_config.rate_limit.as_ref().map(|r| {
mockforge_chaos::config::RateLimitConfig {
enabled: r.enabled,
requests_per_second: r.requests_per_second,
burst_size: r.burst_size,
per_ip: r.per_ip,
per_endpoint: r.per_endpoint,
}
}),
traffic_shaping: chaos_eng_config.traffic_shaping.as_ref().map(|t| {
mockforge_chaos::config::TrafficShapingConfig {
enabled: t.enabled,
bandwidth_limit_bps: t.bandwidth_limit_bps,
packet_loss_percent: t.packet_loss_percent,
max_connections: 0,
connection_timeout_ms: 30000,
}
}),
circuit_breaker: None,
bulkhead: None,
};
chaos_cfg
} else {
ChaosConfig::default()
};
let (chaos_router, chaos_config_arc, latency_tracker, chaos_api_state) =
create_chaos_api_router(chaos_config.clone(), mockai.clone());
http_app = http_app.merge(chaos_router);
println!("✅ Chaos Engineering API available at /api/chaos/*");
let chaos_api_state_for_admin = chaos_api_state.clone();
if chaos_config.enabled {
use axum::middleware::from_fn;
use mockforge_chaos::middleware::{chaos_middleware_with_state, ChaosMiddleware};
use std::sync::{Arc, OnceLock};
let chaos_middleware_instance =
Arc::new(ChaosMiddleware::new(chaos_config_arc.clone(), latency_tracker));
let middleware_init = chaos_middleware_instance.clone();
tokio::spawn(async move {
middleware_init.init_from_config().await;
});
static CHAOS_MIDDLEWARE: OnceLock<Arc<ChaosMiddleware>> = OnceLock::new();
let _ = CHAOS_MIDDLEWARE.set(chaos_middleware_instance.clone());
struct SendSafeWrapper<F>(F);
unsafe impl<F> Send for SendSafeWrapper<F> {}
impl<F: std::future::Future<Output = axum::response::Response>> std::future::Future
for SendSafeWrapper<F>
{
type Output = axum::response::Response;
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
unsafe { std::pin::Pin::new_unchecked(&mut self.get_unchecked_mut().0).poll(cx) }
}
}
http_app =
http_app.layer(from_fn(|req: axum::extract::Request, next: axum::middleware::Next| {
SendSafeWrapper(async move {
let state = CHAOS_MIDDLEWARE
.get()
.expect("Chaos middleware should be initialized")
.clone();
chaos_middleware_with_state(state, req, next).await
})
}));
println!("✅ Chaos middleware integrated - latency recording enabled");
}
println!(
"✅ HTTP server configured with health checks at http://localhost:{}/health (live, ready, startup)",
config.http.port
);
if !config.routes.is_empty() {
println!("✅ Loaded {} custom routes", config.routes.len());
}
println!("✅ WebSocket server configured at ws://localhost:{}/ws", config.websocket.port);
println!("✅ gRPC server configured at localhost:{}", config.grpc.port);
if config.admin.enabled {
println!("✅ Admin UI configured at http://localhost:{}", config.admin.port);
}
println!("💡 Press Ctrl+C to stop");
let metrics_registry = std::sync::Arc::new(MetricsRegistry::new());
if config.observability.prometheus.enabled {
use mockforge_observability::{get_global_registry, SystemMetricsConfig};
let system_metrics_config = SystemMetricsConfig {
enabled: true,
interval_seconds: 15,
};
mockforge_observability::system_metrics::start_with_config(
get_global_registry(),
system_metrics_config,
);
println!("📈 System metrics collector started (interval: 15s)");
}
use tokio_util::sync::CancellationToken;
let shutdown_token = CancellationToken::new();
let health_manager_for_shutdown = health_manager.clone();
let shutdown_token_for_health = shutdown_token.clone();
tokio::spawn(async move {
shutdown_token_for_health.cancelled().await;
health_manager_for_shutdown.trigger_shutdown().await;
});
let http_port = config.http.port;
let http_tls_config = config.http.tls.clone();
let http_shutdown = shutdown_token.clone();
let http_handle = tokio::spawn(async move {
if let Some(ref tls) = http_tls_config {
if tls.enabled {
println!("🔒 HTTPS server listening on https://localhost:{}", http_port);
} else {
println!("📡 HTTP server listening on http://localhost:{}", http_port);
}
} else {
println!("📡 HTTP server listening on http://localhost:{}", http_port);
}
tokio::select! {
result = mockforge_http::serve_router_with_tls(http_port, http_app, http_tls_config) => {
result.map_err(|e| format!("HTTP server error: {}", e))
}
_ = http_shutdown.cancelled() => {
Ok(())
}
}
});
let ws_port = config.websocket.port;
let ws_host = config.websocket.host.clone();
let ws_shutdown = shutdown_token.clone();
let ws_handle = tokio::spawn(async move {
println!("🔌 WebSocket server listening on ws://{}:{}", ws_host, ws_port);
tokio::select! {
result = mockforge_ws::start_with_latency_and_host(ws_port, &ws_host, None) => {
result.map_err(|e| format!("WebSocket server error: {}", e))
}
_ = ws_shutdown.cancelled() => {
Ok(())
}
}
});
let grpc_port = config.grpc.port;
let grpc_enabled = config.grpc.enabled;
let grpc_shutdown = shutdown_token.clone();
let grpc_handle = if grpc_enabled && grpc_port != 0 {
tokio::spawn(async move {
println!("⚡ gRPC server listening on localhost:{}", grpc_port);
tokio::select! {
result = mockforge_grpc::start(grpc_port) => {
result.map_err(|e| format!("gRPC server error: {}", e))
}
_ = grpc_shutdown.cancelled() => {
Ok(())
}
}
})
} else {
tracing::debug!("gRPC server disabled (enabled: {}, port: {})", grpc_enabled, grpc_port);
tokio::spawn(async move {
grpc_shutdown.cancelled().await;
Ok(())
})
};
#[cfg(feature = "smtp")]
let _smtp_handle = if let Some(ref smtp_registry) = smtp_registry {
let smtp_config = config.smtp.clone();
let smtp_shutdown = shutdown_token.clone();
let server_config = mockforge_smtp::SmtpConfig {
port: smtp_config.port,
host: smtp_config.host.clone(),
hostname: smtp_config.hostname.clone(),
fixtures_dir: smtp_config.fixtures_dir.clone(),
timeout_secs: smtp_config.timeout_secs,
max_connections: smtp_config.max_connections,
enable_mailbox: smtp_config.enable_mailbox,
max_mailbox_messages: smtp_config.max_mailbox_messages,
enable_starttls: smtp_config.enable_starttls,
tls_cert_path: smtp_config.tls_cert_path.clone(),
tls_key_path: smtp_config.tls_key_path.clone(),
};
let smtp_reg = match smtp_registry.clone().downcast::<mockforge_smtp::SmtpSpecRegistry>() {
Ok(reg) => reg,
Err(_) => {
use crate::progress::{CliError, ExitCode};
CliError::new(
"SMTP registry type mismatch - failed to downcast registry".to_string(),
ExitCode::ConfigurationError,
)
.with_suggestion(
"Ensure SMTP registry is properly configured and initialized".to_string(),
)
.display_and_exit();
}
};
Some(tokio::spawn(async move {
println!("📧 SMTP server listening on {}:{}", smtp_config.host, smtp_config.port);
tokio::select! {
result = async {
let server = mockforge_smtp::SmtpServer::new(server_config, smtp_reg)?;
server.start().await
} => {
result.map_err(|e| format!("SMTP server error: {}", e))
}
_ = smtp_shutdown.cancelled() => {
println!("🛑 Shutting down SMTP server...");
Ok(())
}
}
}))
} else {
None
};
#[cfg(feature = "mqtt")]
let _mqtt_handle = if let Some(ref _mqtt_registry) = mqtt_registry {
let mqtt_config = config.mqtt.clone();
let mqtt_shutdown = shutdown_token.clone();
let broker_config = mockforge_mqtt::broker::MqttConfig {
port: mqtt_config.port,
host: mqtt_config.host.clone(),
max_connections: mqtt_config.max_connections,
max_packet_size: mqtt_config.max_packet_size,
keep_alive_secs: mqtt_config.keep_alive_secs,
version: mockforge_mqtt::broker::MqttVersion::default(),
};
Some(tokio::spawn(async move {
use mockforge_mqtt::start_mqtt_server;
println!("📡 MQTT broker listening on {}:{}", mqtt_config.host, mqtt_config.port);
tokio::select! {
result = start_mqtt_server(broker_config) => {
result.map_err(|e| format!("MQTT server error: {:?}", e))
}
_ = mqtt_shutdown.cancelled() => {
println!("🛑 Shutting down MQTT broker...");
Ok(())
}
}
}))
} else {
None
};
#[cfg(not(feature = "mqtt"))]
let _mqtt_handle: Option<tokio::task::JoinHandle<Result<(), String>>> = None;
let tunnel_handle = if config.deceptive_deploy.enabled && config.deceptive_deploy.auto_tunnel {
use mockforge_tunnel::{TunnelConfig, TunnelManager, TunnelProvider};
use std::sync::Arc;
use tokio::time::{sleep, Duration};
let local_url = format!("http://localhost:{}", http_port);
let deploy_config = config.deceptive_deploy.clone();
let tunnel_shutdown = shutdown_token.clone();
Some(tokio::spawn(async move {
sleep(Duration::from_secs(2)).await;
let provider = TunnelProvider::SelfHosted; let mut tunnel_config = TunnelConfig::new(&local_url).with_provider(provider);
if let Some(domain) = deploy_config.custom_domain {
tunnel_config.custom_domain = Some(domain);
}
if let Ok(server_url) = std::env::var("MOCKFORGE_TUNNEL_SERVER_URL") {
tunnel_config.server_url = Some(server_url);
}
if let Ok(auth_token) = std::env::var("MOCKFORGE_TUNNEL_AUTH_TOKEN") {
tunnel_config.auth_token = Some(auth_token);
}
match TunnelManager::new(&tunnel_config) {
Ok(manager) => {
println!("🌐 Starting tunnel for deceptive deploy...");
match manager.create_tunnel(&tunnel_config).await {
Ok(status) => {
println!("✅ Tunnel created successfully!");
println!(" Public URL: {}", status.public_url);
println!(" Tunnel ID: {}", status.tunnel_id);
println!(
"💡 Your mock API is now accessible at: {}",
status.public_url
);
let metadata_path = std::path::Path::new(".mockforge/deployment.json");
if metadata_path.exists() {
if let Ok(metadata_content) = std::fs::read_to_string(metadata_path)
{
if let Ok(mut metadata) =
serde_json::from_str::<serde_json::Value>(&metadata_content)
{
metadata["tunnel_url"] =
serde_json::Value::String(status.public_url.clone());
if let Ok(updated_json) =
serde_json::to_string_pretty(&metadata)
{
if let Err(e) =
std::fs::write(metadata_path, updated_json)
{
tracing::warn!("Failed to update deployment metadata with tunnel URL: {}", e);
} else {
tracing::info!("Updated deployment metadata with tunnel URL: {}", status.public_url);
}
}
}
}
}
tokio::select! {
_ = tunnel_shutdown.cancelled() => {
println!("🛑 Stopping tunnel...");
if let Err(e) = manager.stop_tunnel().await {
eprintln!("⚠️ Warning: Failed to stop tunnel: {}", e);
}
Ok::<(), anyhow::Error>(())
}
}
}
Err(e) => {
eprintln!("⚠️ Warning: Failed to create tunnel: {}", e);
eprintln!("💡 You can start a tunnel manually with: mockforge tunnel start --local-url {}", local_url);
Ok(())
}
}
}
Err(e) => {
eprintln!("⚠️ Warning: Failed to initialize tunnel manager: {}", e);
eprintln!("💡 You can start a tunnel manually with: mockforge tunnel start --local-url {}", local_url);
Ok(())
}
}
}))
} else {
None
};
#[cfg(feature = "kafka")]
let _kafka_handle = if config.kafka.enabled {
let kafka_config = config.kafka.clone();
let kafka_shutdown = shutdown_token.clone();
Some(tokio::spawn(async move {
use mockforge_kafka::KafkaMockBroker;
println!("📨 Kafka broker listening on {}:{}", kafka_config.host, kafka_config.port);
match KafkaMockBroker::new(kafka_config.clone()).await {
Ok(broker) => {
tokio::select! {
result = broker.start() => {
result.map_err(|e| format!("Kafka broker error: {:?}", e))
}
_ = kafka_shutdown.cancelled() => {
println!("🛑 Shutting down Kafka broker...");
Ok(())
}
}
}
Err(e) => Err(format!("Failed to initialize Kafka broker: {:?}", e)),
}
}))
} else {
None
};
#[cfg(not(feature = "kafka"))]
let _kafka_handle: Option<tokio::task::JoinHandle<Result<(), String>>> = None;
#[cfg(feature = "amqp")]
let _amqp_handle = if config.amqp.enabled {
let amqp_config = config.amqp.clone();
let amqp_shutdown = shutdown_token.clone();
Some(tokio::spawn(async move {
use mockforge_amqp::{AmqpBroker, AmqpSpecRegistry};
use std::sync::Arc;
println!("🐰 AMQP broker listening on {}:{}", amqp_config.host, amqp_config.port);
let spec_registry = Arc::new(
AmqpSpecRegistry::new(amqp_config.clone())
.await
.map_err(|e| format!("Failed to create AMQP spec registry: {:?}", e))?,
);
if let Some(ref fixtures_dir) = amqp_config.fixtures_dir {
if fixtures_dir.exists() {
println!(" Loading AMQP fixtures from {:?}", fixtures_dir);
}
}
let broker = AmqpBroker::new(amqp_config.clone(), spec_registry);
tokio::select! {
result = broker.start() => {
result.map_err(|e| format!("AMQP broker error: {:?}", e))
}
_ = amqp_shutdown.cancelled() => {
println!("🛑 Shutting down AMQP broker...");
Ok(())
}
}
}))
} else {
None
};
#[cfg(not(feature = "amqp"))]
let _amqp_handle: Option<tokio::task::JoinHandle<Result<(), String>>> = None;
#[cfg(feature = "tcp")]
let _tcp_handle = if config.tcp.enabled {
use mockforge_tcp::{TcpConfig as TcpServerConfig, TcpServer, TcpSpecRegistry};
use std::sync::Arc;
let tcp_config = config.tcp.clone();
let tcp_shutdown = shutdown_token.clone();
let server_config = TcpServerConfig {
port: tcp_config.port,
host: tcp_config.host.clone(),
max_connections: tcp_config.max_connections,
timeout_secs: tcp_config.timeout_secs,
fixtures_dir: tcp_config.fixtures_dir.clone(),
echo_mode: tcp_config.echo_mode,
enable_tls: tcp_config.enable_tls,
tls_cert_path: tcp_config.tls_cert_path.clone(),
tls_key_path: tcp_config.tls_key_path.clone(),
read_buffer_size: 8192, write_buffer_size: 8192,
delimiter: None, };
Some(tokio::spawn(async move {
let mut registry = TcpSpecRegistry::new();
if let Some(ref fixtures_dir) = server_config.fixtures_dir {
if fixtures_dir.exists() {
if let Err(e) = registry.load_fixtures(fixtures_dir) {
eprintln!(
"⚠️ Warning: Failed to load TCP fixtures from {:?}: {}",
fixtures_dir, e
);
} else {
println!(" Loaded TCP fixtures from {:?}", fixtures_dir);
}
}
}
let registry_arc = Arc::new(registry);
println!("🔌 TCP server listening on {}:{}", server_config.host, server_config.port);
match TcpServer::new(server_config, registry_arc) {
Ok(server) => {
tokio::select! {
result = server.start() => {
result.map_err(|e| format!("TCP server error: {}", e))
}
_ = tcp_shutdown.cancelled() => {
println!("🛑 Shutting down TCP server...");
Ok(())
}
}
}
Err(e) => Err(format!("Failed to initialize TCP server: {}", e)),
}
}))
} else {
None
};
#[cfg(not(feature = "tcp"))]
let _tcp_handle: Option<tokio::task::JoinHandle<Result<(), String>>> = None;
use mockforge_core::latency::{FaultConfig, LatencyInjector, LatencyProfile};
use tokio::sync::RwLock;
let latency_injector_for_admin = if config.core.latency_enabled {
let latency_profile = config.core.default_latency.clone();
let fault_config = FaultConfig::default();
Some(Arc::new(RwLock::new(LatencyInjector::new(latency_profile, fault_config))))
} else {
None
};
let chaos_api_state_for_admin_clone = chaos_api_state_for_admin.clone();
let latency_injector_for_admin_clone = latency_injector_for_admin.clone();
let mockai_for_admin = mockai.clone();
let continuum_config_for_admin = config.reality_continuum.clone();
let time_travel_manager_for_admin = time_travel_manager.clone();
let admin_handle = if config.admin.enabled {
let admin_port = config.admin.port;
let http_port = config.http.port;
let ws_port = config.websocket.port;
let grpc_port = config.grpc.port;
let prometheus_url = config.admin.prometheus_url.clone();
let admin_shutdown = shutdown_token.clone();
let admin_host = config.admin.host.clone();
let http_host = config.http.host.clone();
let ws_host = config.websocket.host.clone();
let grpc_host = config.grpc.host.clone();
let chaos_state = chaos_api_state_for_admin_clone.clone();
let latency_injector = latency_injector_for_admin_clone.clone();
let mockai_ref = mockai_for_admin.clone();
let continuum_config = continuum_config_for_admin.clone();
let time_travel_manager_clone = time_travel_manager_for_admin.clone();
Some(tokio::spawn(async move {
println!("🎛️ Admin UI listening on http://{}:{}", admin_host, admin_port);
use crate::progress::parse_address;
let addr = match parse_address(&format!("{}:{}", admin_host, admin_port), "admin UI") {
Ok(addr) => addr,
Err(e) => {
return Err(format!(
"Failed to bind Admin UI to {}:{}: {}",
admin_host, admin_port, e.message
))
}
};
let http_addr =
match parse_address(&format!("{}:{}", http_host, http_port), "HTTP server") {
Ok(addr) => Some(addr),
Err(e) => {
return Err(format!(
"Failed to parse HTTP server address {}:{}: {}",
http_host, http_port, e.message
))
}
};
let ws_addr =
match parse_address(&format!("{}:{}", ws_host, ws_port), "WebSocket server") {
Ok(addr) => Some(addr),
Err(e) => {
return Err(format!(
"Failed to parse WebSocket server address {}:{}: {}",
ws_host, ws_port, e.message
))
}
};
let grpc_addr =
match parse_address(&format!("{}:{}", grpc_host, grpc_port), "gRPC server") {
Ok(addr) => Some(addr),
Err(e) => {
return Err(format!(
"Failed to parse gRPC server address {}:{}: {}",
grpc_host, grpc_port, e.message
))
}
};
let continuum_config = Some(continuum_config);
let virtual_clock_for_continuum = Some(time_travel_manager_clone.clock());
tokio::select! {
result = mockforge_ui::start_admin_server(
addr,
http_addr,
ws_addr,
grpc_addr,
None, true, prometheus_url,
Some(chaos_state),
latency_injector,
mockai_ref,
continuum_config,
virtual_clock_for_continuum,
) => {
result.map_err(|e| format!("Admin UI server error: {}", e))
}
_ = admin_shutdown.cancelled() => {
Ok(())
}
}
}))
} else {
None
};
let metrics_handle = if config.observability.prometheus.enabled {
let metrics_port = config.observability.prometheus.port;
let metrics_registry = metrics_registry.clone();
let metrics_shutdown = shutdown_token.clone();
Some(tokio::spawn(async move {
println!(
"📊 Prometheus metrics server listening on http://0.0.0.0:{}/metrics",
metrics_port
);
let app = prometheus_router(metrics_registry);
let addr = SocketAddr::from(([0, 0, 0, 0], metrics_port));
let listener = TcpListener::bind(addr)
.await
.map_err(|e| format!("Failed to bind metrics server to {}: {}", addr, e))?;
tokio::select! {
result = serve(listener, app) => {
result.map_err(|e| format!("Metrics server error: {}", e))
}
_ = metrics_shutdown.cancelled() => {
Ok(())
}
}
}))
} else {
None
};
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
health_manager.set_ready().await;
tracing::info!("Service marked as ready - all servers initialized");
let result = tokio::select! {
result = http_handle => {
match result {
Ok(Ok(())) => {
println!("📡 HTTP server stopped gracefully");
None
}
Ok(Err(e)) => {
eprintln!("❌ {}", e);
Some(e)
}
Err(e) => {
let error = format!("HTTP server task panicked: {}", e);
eprintln!("❌ {}", error);
Some(error)
}
}
}
result = ws_handle => {
match result {
Ok(Ok(())) => {
println!("🔌 WebSocket server stopped gracefully");
None
}
Ok(Err(e)) => {
eprintln!("❌ {}", e);
Some(e)
}
Err(e) => {
let error = format!("WebSocket server task panicked: {}", e);
eprintln!("❌ {}", error);
Some(error)
}
}
}
result = grpc_handle => {
match result {
Ok(Ok(())) => {
println!("⚡ gRPC server stopped gracefully");
None
}
Ok(Err(e)) => {
eprintln!("❌ {}", e);
Some(e)
}
Err(e) => {
let error = format!("gRPC server task panicked: {}", e);
eprintln!("❌ {}", error);
Some(error)
}
}
}
result = async {
if let Some(handle) = admin_handle {
Some(handle.await)
} else {
std::future::pending::<Option<Result<Result<(), String>, tokio::task::JoinError>>>().await
}
} => {
match result {
Some(Ok(Ok(()))) => {
println!("🎛️ Admin UI stopped gracefully");
None
}
Some(Ok(Err(e))) => {
eprintln!("❌ {}", e);
Some(e)
}
Some(Err(e)) => {
let error = format!("Admin UI task panicked: {}", e);
eprintln!("❌ {}", error);
Some(error)
}
None => None
}
}
result = async {
if let Some(handle) = metrics_handle {
Some(handle.await)
} else {
std::future::pending::<Option<Result<Result<(), String>, tokio::task::JoinError>>>().await
}
} => {
match result {
Some(Ok(Ok(()))) => {
println!("📊 Metrics server stopped gracefully");
None
}
Some(Ok(Err(e))) => {
eprintln!("❌ {}", e);
Some(e)
}
Some(Err(e)) => {
let error = format!("Metrics server task panicked: {}", e);
eprintln!("❌ {}", error);
Some(error)
}
None => None
}
}
_ = tokio::signal::ctrl_c() => {
println!("🛑 Received shutdown signal");
health_manager.trigger_shutdown().await;
None
}
};
println!("👋 Shutting down remaining servers...");
shutdown_token.cancel();
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
if let Some(error) = result {
Err(error.into())
} else {
Ok(())
}
}
async fn handle_contract_diff(
diff_command: ContractDiffCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use contract_diff_commands::{
handle_contract_diff_analyze, handle_contract_diff_apply_patch,
handle_contract_diff_compare, handle_contract_diff_generate_patch,
};
use mockforge_core::ai_contract_diff::ContractDiffConfig;
match diff_command {
ContractDiffCommands::Analyze {
spec,
request_path,
capture_id,
output,
llm_provider,
llm_model,
llm_api_key,
confidence_threshold,
} => {
let config = if llm_provider.is_some()
|| llm_model.is_some()
|| llm_api_key.is_some()
|| confidence_threshold.is_some()
{
let mut cfg = ContractDiffConfig::default();
if let Some(provider) = llm_provider {
cfg.llm_provider = provider;
}
if let Some(model) = llm_model {
cfg.llm_model = model;
}
if let Some(api_key) = llm_api_key {
cfg.api_key = Some(api_key);
}
if let Some(threshold) = confidence_threshold {
cfg.confidence_threshold = threshold;
}
Some(cfg)
} else {
None
};
handle_contract_diff_analyze(spec, request_path, capture_id, output, config).await?;
}
ContractDiffCommands::Compare {
old_spec,
new_spec,
output,
} => {
handle_contract_diff_compare(old_spec, new_spec, output).await?;
}
ContractDiffCommands::GeneratePatch {
spec,
request_path,
capture_id,
output,
llm_provider,
llm_model,
llm_api_key,
} => {
let config = if llm_provider.is_some() || llm_model.is_some() || llm_api_key.is_some() {
let mut cfg = ContractDiffConfig::default();
if let Some(provider) = llm_provider {
cfg.llm_provider = provider;
}
if let Some(model) = llm_model {
cfg.llm_model = model;
}
if let Some(api_key) = llm_api_key {
cfg.api_key = Some(api_key);
}
Some(cfg)
} else {
None
};
handle_contract_diff_generate_patch(spec, request_path, capture_id, output, config)
.await?;
}
ContractDiffCommands::ApplyPatch {
spec,
patch,
output,
} => {
handle_contract_diff_apply_patch(spec, patch, output).await?;
}
}
Ok(())
}
async fn handle_data(
data_command: DataCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match data_command {
DataCommands::Template {
template,
rows,
format,
output,
rag,
rag_provider,
rag_model,
rag_endpoint,
rag_timeout,
rag_max_retries,
} => {
println!("🎯 Generating {} rows using '{}' template", rows, template);
println!("📄 Output format: {}", format);
if rag {
println!("🧠 RAG mode enabled");
if let Some(provider) = &rag_provider {
println!("🤖 RAG Provider: {}", provider);
}
if let Some(model) = &rag_model {
println!("🧠 RAG Model: {}", model);
}
}
if let Some(output_path) = &output {
println!("💾 Output file: {}", output_path.display());
}
let result = generate_from_template(
&template,
rows,
rag,
rag_provider,
rag_model,
rag_endpoint,
rag_timeout,
rag_max_retries,
)
.await?;
output_result(result, format, output).await?;
}
DataCommands::Schema {
schema,
rows,
format,
output,
} => {
println!("📋 Generating {} rows from schema: {}", rows, schema.display());
println!("📄 Output format: {}", format);
if let Some(output_path) = &output {
println!("💾 Output file: {}", output_path.display());
}
let result = generate_from_json_schema_file(&schema, rows).await?;
output_result(result, format, output).await?;
}
DataCommands::MockOpenapi {
spec,
rows,
format,
output,
realistic,
include_optional,
validate,
array_size,
max_array_size,
} => {
println!("🚀 Generating mock data from OpenAPI spec: {}", spec.display());
println!("📊 Rows per schema: {}", rows);
println!("📄 Output format: {}", format);
if realistic {
println!("🎭 Realistic data generation enabled");
}
if include_optional {
println!("📝 Including optional fields");
}
if validate {
println!("✅ Schema validation enabled");
}
println!("📏 Array size: {} (max: {})", array_size, max_array_size);
if let Some(output_path) = &output {
println!("💾 Output file: {}", output_path.display());
}
let result = generate_mock_data_from_openapi(
&spec,
rows,
realistic,
include_optional,
validate,
array_size,
max_array_size,
)
.await?;
output_mock_data_result(result, format, output).await?;
}
DataCommands::MockServer {
spec,
port,
host,
cors,
log_requests,
delay,
realistic,
include_optional,
validate,
} => {
println!("🌐 Starting mock server based on OpenAPI spec: {}", spec.display());
println!("🔗 Server will run on {}:{}", host, port);
if cors {
println!("🌍 CORS enabled");
}
if log_requests {
println!("📝 Request logging enabled");
}
if !delay.is_empty() {
println!("⏱️ Response delays configured: {:?}", delay);
}
if realistic {
println!("🎭 Realistic data generation enabled");
}
if include_optional {
println!("📝 Including optional fields");
}
if validate {
println!("✅ Schema validation enabled");
}
start_mock_server_from_spec(
&spec,
port,
&host,
cors,
log_requests,
delay,
realistic,
include_optional,
validate,
)
.await?;
}
}
Ok(())
}
async fn handle_admin(
port: u16,
_config: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
println!("🎛️ Starting MockForge Admin UI...");
let addr = format!("127.0.0.1:{}", port).parse()?;
let prometheus_url =
std::env::var("PROMETHEUS_URL").unwrap_or_else(|_| "http://localhost:9090".to_string());
mockforge_ui::start_admin_server(
addr,
None, None, None, None, true, prometheus_url,
None, None, None, None, None, )
.await?;
println!("✅ Admin UI started successfully!");
println!("🌐 Access at: http://localhost:{}/", port);
tokio::signal::ctrl_c().await?;
println!("👋 Shutting down admin UI...");
Ok(())
}
async fn handle_quick(
file: PathBuf,
port: u16,
host: String,
admin: bool,
admin_port: u16,
metrics: bool,
metrics_port: u16,
logging: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_http::quick_mock::{build_quick_router, QuickMockState};
use std::fs;
println!("\n⚡ MockForge Quick Mock Mode");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!("📁 Loading data from: {}", file.display());
let json_str = fs::read_to_string(&file)
.map_err(|e| format!("Failed to read file '{}': {}", file.display(), e))?;
let json_data: serde_json::Value = serde_json::from_str(&json_str)
.map_err(|e| format!("Failed to parse JSON from '{}': {}", file.display(), e))?;
println!("✓ JSON loaded successfully");
println!("🔍 Auto-detecting routes from JSON keys...");
let state = QuickMockState::from_json(json_data)
.await
.map_err(|e| format!("Failed to create quick mock state: {}", e))?;
let resource_names = state.resource_names().await;
println!("✓ Detected {} resource(s):", resource_names.len());
for resource in &resource_names {
println!(" • /{}", resource);
}
let app = build_quick_router(state).await;
println!();
println!("🚀 Quick Mock Server Configuration:");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!(" HTTP Server: http://{}:{}", host, port);
if admin {
println!(" Admin UI: http://{}:{}", host, admin_port);
}
if metrics {
println!(" Metrics: http://{}:{}/__metrics", host, metrics_port);
}
if logging {
println!(" Logging: Enabled");
}
println!();
println!("📚 Available Endpoints:");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
for resource in &resource_names {
println!(" GET /{} - List all", resource);
println!(" GET /{}/:id - Get by ID", resource);
println!(" POST /{} - Create new", resource);
println!(" PUT /{}/:id - Update by ID", resource);
println!(" DELETE /{}/:id - Delete by ID", resource);
println!();
}
println!(" GET /__quick/info - API information");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
let addr: SocketAddr = format!("{}:{}", host, port).parse()?;
let listener = TcpListener::bind(addr).await?;
println!();
println!("✅ Server started successfully!");
println!("💡 Press Ctrl+C to stop");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n");
serve(listener, app)
.with_graceful_shutdown(async {
tokio::signal::ctrl_c().await.unwrap_or_else(|e| {
eprintln!("⚠️ Warning: Failed to install CTRL+C signal handler: {}", e);
eprintln!("💡 Server may not shut down gracefully on SIGINT");
});
})
.await?;
println!("\n👋 Server stopped\n");
Ok(())
}
async fn handle_sync(
workspace_dir: PathBuf,
_config: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
println!("\n🔄 Starting MockForge Sync Daemon...");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!("📁 Workspace directory: {}", workspace_dir.display());
println!();
println!("ℹ️ What the sync daemon does:");
println!(" • Monitors the workspace directory for .yaml/.yml file changes");
println!(" • Automatically imports new or modified request files");
println!(" • Syncs changes bidirectionally between files and workspace");
println!(" • Skips hidden files (starting with .)");
println!();
println!("🔍 Monitoring for file changes...");
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
println!();
let sync_service = mockforge_core::SyncService::new(&workspace_dir);
sync_service.start().await?;
println!("✅ Sync daemon started successfully!");
println!("💡 Press Ctrl+C to stop\n");
tokio::signal::ctrl_c().await?;
println!("\n🛑 Received shutdown signal");
sync_service.stop().await?;
println!("👋 Sync daemon stopped\n");
Ok(())
}
fn load_rag_config(
provider_override: Option<String>,
model_override: Option<String>,
endpoint_override: Option<String>,
timeout_override: Option<u64>,
max_retries_override: Option<usize>,
) -> RagConfig {
let provider = provider_override
.or_else(|| std::env::var("MOCKFORGE_RAG_PROVIDER").ok())
.unwrap_or_else(|| "openai".to_string());
let llm_provider = match provider.to_lowercase().as_str() {
"anthropic" => LlmProvider::Anthropic,
"ollama" => LlmProvider::Ollama,
"openai_compatible" => LlmProvider::OpenAICompatible,
_ => LlmProvider::OpenAI,
};
let embedding_provider = match std::env::var("MOCKFORGE_EMBEDDING_PROVIDER")
.unwrap_or_else(|_| "openai".to_string())
.to_lowercase()
.as_str()
{
"openai_compatible" => EmbeddingProvider::OpenAICompatible,
_ => EmbeddingProvider::OpenAI,
};
RagConfig {
provider: llm_provider.clone(),
api_endpoint: endpoint_override
.or_else(|| std::env::var("MOCKFORGE_RAG_API_ENDPOINT").ok())
.unwrap_or_else(|| match llm_provider {
LlmProvider::OpenAI => "https://api.openai.com/v1/chat/completions".to_string(),
LlmProvider::Anthropic => "https://api.anthropic.com/v1/messages".to_string(),
LlmProvider::Ollama => "http://localhost:11434/api/generate".to_string(),
LlmProvider::OpenAICompatible => {
"http://localhost:8000/v1/chat/completions".to_string()
}
}),
api_key: std::env::var("MOCKFORGE_RAG_API_KEY").ok(),
model: model_override
.or_else(|| std::env::var("MOCKFORGE_RAG_MODEL").ok())
.unwrap_or_else(|| match llm_provider {
LlmProvider::OpenAI => "gpt-3.5-turbo".to_string(),
LlmProvider::Anthropic => "claude-3-sonnet-20240229".to_string(),
LlmProvider::Ollama => "llama2".to_string(),
LlmProvider::OpenAICompatible => "gpt-3.5-turbo".to_string(),
}),
max_tokens: std::env::var("MOCKFORGE_RAG_MAX_TOKENS")
.unwrap_or_else(|_| "1000".to_string())
.parse()
.unwrap_or(1000),
temperature: std::env::var("MOCKFORGE_RAG_TEMPERATURE")
.unwrap_or_else(|_| "0.7".to_string())
.parse()
.unwrap_or(0.7),
context_window: std::env::var("MOCKFORGE_RAG_CONTEXT_WINDOW")
.unwrap_or_else(|_| "4000".to_string())
.parse()
.unwrap_or(4000),
semantic_search_enabled: std::env::var("MOCKFORGE_SEMANTIC_SEARCH")
.unwrap_or_else(|_| "true".to_string())
.parse()
.unwrap_or(true),
embedding_provider,
embedding_model: std::env::var("MOCKFORGE_EMBEDDING_MODEL")
.unwrap_or_else(|_| "text-embedding-ada-002".to_string()),
embedding_endpoint: std::env::var("MOCKFORGE_EMBEDDING_ENDPOINT").ok(),
similarity_threshold: std::env::var("MOCKFORGE_SIMILARITY_THRESHOLD")
.unwrap_or_else(|_| "0.7".to_string())
.parse()
.unwrap_or(0.7),
max_chunks: std::env::var("MOCKFORGE_MAX_CHUNKS")
.unwrap_or_else(|_| "5".to_string())
.parse()
.unwrap_or(5),
request_timeout_seconds: timeout_override
.or_else(|| {
std::env::var("MOCKFORGE_RAG_TIMEOUT_SECONDS").ok().and_then(|s| s.parse().ok())
})
.unwrap_or(30),
max_retries: max_retries_override
.or_else(|| {
std::env::var("MOCKFORGE_RAG_MAX_RETRIES").ok().and_then(|s| s.parse().ok())
})
.unwrap_or(3),
}
}
#[allow(clippy::too_many_arguments)]
async fn generate_from_template(
template: &str,
_rows: usize,
rag_enabled: bool,
rag_provider: Option<String>,
rag_model: Option<String>,
rag_endpoint: Option<String>,
rag_timeout: Option<u64>,
rag_max_retries: Option<usize>,
) -> Result<mockforge_data::GenerationResult, Box<dyn std::error::Error + Send + Sync>> {
use mockforge_data::schema::templates;
let config = mockforge_data::DataConfig {
rows: _rows,
rag_enabled,
..Default::default()
};
let schema = match template.to_lowercase().as_str() {
"user" | "users" => templates::user_schema(),
"product" | "products" => templates::product_schema(),
"order" | "orders" => templates::order_schema(),
_ => {
return Err(format!(
"Unknown template: {}. Available templates: user, product, order",
template
)
.into());
}
};
let mut generator = mockforge_data::DataGenerator::new(schema, config)
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
if rag_enabled {
let rag_config = load_rag_config(
rag_provider.clone(),
rag_model.clone(),
rag_endpoint.clone(),
rag_timeout,
rag_max_retries,
);
generator
.configure_rag(rag_config)
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
}
generator
.generate()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
async fn generate_from_json_schema_file(
schema_path: &PathBuf,
rows: usize,
) -> Result<mockforge_data::GenerationResult, Box<dyn std::error::Error + Send + Sync>> {
let schema_content = tokio::fs::read_to_string(schema_path).await?;
let schema_json: serde_json::Value = serde_json::from_str(&schema_content)?;
mockforge_data::generate_from_json_schema(&schema_json, rows)
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
async fn output_result(
result: mockforge_data::GenerationResult,
format: String,
output_path: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let output_content = match format.to_lowercase().as_str() {
"json" => result.to_json_string()?,
"jsonl" | "jsonlines" => result.to_jsonl_string()?,
"csv" => {
let mut csv_output = String::new();
if let Some(first_row) = result.data.first() {
if let Some(obj) = first_row.as_object() {
let headers: Vec<String> = obj.keys().map(|k| k.to_string()).collect();
csv_output.push_str(&headers.join(","));
csv_output.push('\n');
for row in &result.data {
if let Some(obj) = row.as_object() {
let values: Vec<String> = headers
.iter()
.map(|header| {
obj.get(header)
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string()
})
.collect();
csv_output.push_str(&values.join(","));
csv_output.push('\n');
}
}
}
}
csv_output
}
_ => result.to_json_string()?, };
if let Some(path) = output_path {
tokio::fs::write(&path, &output_content).await?;
println!("💾 Data written to: {}", path.display());
} else {
println!("{}", output_content);
}
println!("✅ Generated {} rows in {}ms", result.count, result.generation_time_ms);
if !result.warnings.is_empty() {
println!("⚠️ Warnings:");
for warning in result.warnings {
println!(" - {}", warning);
}
}
Ok(())
}
async fn generate_mock_data_from_openapi(
spec_path: &PathBuf,
rows: usize,
realistic: bool,
include_optional: bool,
validate: bool,
array_size: usize,
max_array_size: usize,
) -> Result<mockforge_data::MockDataResult, Box<dyn std::error::Error + Send + Sync>> {
let spec_content = tokio::fs::read_to_string(spec_path).await?;
let spec_json: serde_json::Value = if spec_path.extension().and_then(|s| s.to_str())
== Some("yaml")
|| spec_path.extension().and_then(|s| s.to_str()) == Some("yml")
{
serde_yaml::from_str(&spec_content)?
} else {
serde_json::from_str(&spec_content)?
};
let config = mockforge_data::MockGeneratorConfig::new()
.realistic_mode(realistic)
.include_optional_fields(include_optional)
.validate_generated_data(validate)
.default_array_size(array_size)
.max_array_size(max_array_size);
let mut generator = mockforge_data::MockDataGenerator::with_config(config);
generator
.generate_from_openapi_spec(&spec_json)
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
async fn output_mock_data_result(
result: mockforge_data::MockDataResult,
format: String,
output_path: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let output_content = match format.to_lowercase().as_str() {
"json" => serde_json::to_string_pretty(&result)?,
"jsonl" | "jsonlines" => {
let mut jsonl_output = String::new();
for (schema_name, schema_data) in &result.schemas {
let schema_line = json!({
"type": "schema",
"name": schema_name,
"data": schema_data
});
jsonl_output.push_str(&serde_json::to_string(&schema_line)?);
jsonl_output.push('\n');
}
for (endpoint, response) in &result.responses {
let response_line = json!({
"type": "response",
"endpoint": endpoint,
"status": response.status,
"headers": response.headers,
"body": response.body
});
jsonl_output.push_str(&serde_json::to_string(&response_line)?);
jsonl_output.push('\n');
}
jsonl_output
}
"csv" => {
let mut csv_output = String::new();
csv_output.push_str("type,name,endpoint,status,data\n");
for (schema_name, schema_data) in &result.schemas {
csv_output.push_str(&format!(
"schema,{},\"\",\"\",{}\n",
schema_name,
serde_json::to_string(schema_data)?.replace("\"", "\"\"")
));
}
for (endpoint, response) in &result.responses {
csv_output.push_str(&format!(
"response,\"\",{},{},{}\n",
endpoint.replace("\"", "\"\""),
response.status,
serde_json::to_string(&response.body)?.replace("\"", "\"\"")
));
}
csv_output
}
_ => serde_json::to_string_pretty(&result)?, };
if let Some(path) = output_path {
tokio::fs::write(&path, &output_content).await?;
println!("💾 Mock data written to: {}", path.display());
} else {
println!("{}", output_content);
}
println!(
"✅ Generated mock data for {} schemas and {} endpoints",
result.schemas.len(),
result.responses.len()
);
if !result.warnings.is_empty() {
println!("⚠️ Warnings:");
for warning in result.warnings {
println!(" - {}", warning);
}
}
Ok(())
}
async fn start_mock_server_from_spec(
spec_path: &PathBuf,
port: u16,
host: &str,
cors: bool,
log_requests: bool,
delays: Vec<String>,
realistic: bool,
include_optional: bool,
validate: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let spec_content = tokio::fs::read_to_string(spec_path).await?;
let spec_json: serde_json::Value = if spec_path.extension().and_then(|s| s.to_str())
== Some("yaml")
|| spec_path.extension().and_then(|s| s.to_str()) == Some("yml")
{
serde_yaml::from_str(&spec_content)?
} else {
serde_json::from_str(&spec_content)?
};
let mut config = mockforge_data::MockServerConfig::new(spec_json)
.port(port)
.host(host.to_string())
.enable_cors(cors)
.log_requests(log_requests)
.generator_config(
mockforge_data::MockGeneratorConfig::new()
.realistic_mode(realistic)
.include_optional_fields(include_optional)
.validate_generated_data(validate),
);
for delay_spec in delays {
if let Some((endpoint, delay_ms)) = delay_spec.split_once(':') {
if let Ok(delay) = delay_ms.parse::<u64>() {
config = config.response_delay(endpoint.to_string(), delay);
}
}
}
println!("🚀 Starting mock server...");
println!("📡 Server will be available at: http://{}:{}", host, port);
println!("📋 OpenAPI spec: {}", spec_path.display());
println!("🛑 Press Ctrl+C to stop the server");
mockforge_data::start_mock_server_with_config(config)
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
}
async fn handle_chaos_command(
chaos_command: ChaosCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match chaos_command {
ChaosCommands::Profile { profile_command } => match profile_command {
ProfileCommands::Apply { name, base_url } => {
println!("🔧 Applying chaos profile: {}", name);
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/profiles/{}/apply", base_url, name);
let response = client.post(&url).send().await?;
if response.status().is_success() {
println!("✅ Profile '{}' applied successfully", name);
} else {
let error_text = response.text().await.unwrap_or_default();
eprintln!("❌ Failed to apply profile: {}", error_text);
std::process::exit(1);
}
}
ProfileCommands::Export {
name,
format,
output,
base_url,
} => {
println!("📤 Exporting profile: {}", name);
let client = reqwest::Client::new();
let url =
format!("{}/api/chaos/profiles/{}/export?format={}", base_url, name, format);
let response = client.get(&url).send().await?;
if response.status().is_success() {
let content = response.text().await?;
if let Some(output_path) = output {
tokio::fs::write(&output_path, content).await?;
println!("✅ Profile exported to: {}", output_path.display());
} else {
println!("{}", content);
}
} else {
let error_text = response.text().await.unwrap_or_default();
eprintln!("❌ Failed to export profile: {}", error_text);
std::process::exit(1);
}
}
ProfileCommands::Import { file, base_url } => {
println!("📥 Importing profile from: {}", file.display());
let content = tokio::fs::read_to_string(&file).await?;
let format = if file.extension().and_then(|s| s.to_str()) == Some("yaml")
|| file.extension().and_then(|s| s.to_str()) == Some("yml")
{
"yaml"
} else {
"json"
};
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/profiles/import", base_url);
let response = client
.post(&url)
.json(&serde_json::json!({
"content": content,
"format": format
}))
.send()
.await?;
if response.status().is_success() {
println!("✅ Profile imported successfully");
} else {
let error_text = response.text().await.unwrap_or_default();
eprintln!("❌ Failed to import profile: {}", error_text);
std::process::exit(1);
}
}
ProfileCommands::List { base_url } => {
println!("📋 Listing available chaos profiles...");
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/profiles", base_url);
let response = client.get(&url).send().await?;
if response.status().is_success() {
let profiles: Vec<serde_json::Value> = response.json().await?;
println!("\nAvailable Profiles:");
println!("{:-<80}", "");
for profile in profiles {
let name = profile["name"].as_str().unwrap_or("unknown");
let description = profile["description"].as_str().unwrap_or("");
let builtin = profile["builtin"].as_bool().unwrap_or(false);
let tags = profile["tags"]
.as_array()
.map(|arr| {
arr.iter().filter_map(|v| v.as_str()).collect::<Vec<_>>().join(", ")
})
.unwrap_or_default();
println!(
" • {} {}",
name,
if builtin { "(built-in)" } else { "(custom)" }
);
if !description.is_empty() {
println!(" {}", description);
}
if !tags.is_empty() {
println!(" Tags: {}", tags);
}
println!();
}
} else {
let error_text = response.text().await.unwrap_or_default();
eprintln!("❌ Failed to list profiles: {}", error_text);
std::process::exit(1);
}
}
},
}
Ok(())
}
fn handle_completions(shell: Shell) {
let mut cmd = Cli::command();
let bin_name = cmd.get_name().to_string();
generate(shell, &mut cmd, bin_name, &mut std::io::stdout());
}
async fn handle_schema(
output: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_schema::generate_config_schema_json;
use std::fs;
let schema_json = generate_config_schema_json();
if let Some(output_path) = output {
fs::write(&output_path, schema_json)?;
println!("✅ JSON Schema generated: {}", output_path.display());
} else {
println!("{}", schema_json);
}
Ok(())
}
async fn handle_generate(
config_path: Option<PathBuf>,
spec_path: Option<PathBuf>,
output_path: Option<PathBuf>,
verbose: bool,
dry_run: bool,
watch: bool,
watch_debounce: u64,
progress: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_core::{discover_config_file, load_generate_config_with_fallback};
use progress::{CliError, ExitCode, LogLevel, ProgressManager};
use std::time::Instant;
let mut progress_mgr = ProgressManager::new(verbose);
if watch {
let files_to_watch = if let Some(spec) = &spec_path {
vec![spec.clone()]
} else if let Some(config) = &config_path {
vec![config.clone()]
} else {
match discover_config_file() {
Ok(path) => vec![path],
Err(_) => {
return Err(CliError::new(
"No configuration file found for watch mode".to_string(),
ExitCode::ConfigurationError,
)
.with_suggestion(
"Provide --config or --spec flag, or create mockforge.toml".to_string(),
)
.display_and_exit());
}
}
};
progress_mgr.log(LogLevel::Info, "🔄 Starting watch mode...");
progress_mgr.log(
LogLevel::Info,
&format!("👀 Watching {} file(s) for changes", files_to_watch.len()),
);
if let Err(e) = execute_generation(
&mut progress_mgr,
config_path.clone(),
spec_path.clone(),
output_path.clone(),
verbose,
dry_run,
progress,
)
.await
{
progress_mgr.log(LogLevel::Error, &format!("Initial generation failed: {}", e));
return Err(e);
}
let callback = move || {
let config_path = config_path.clone();
let spec_path = spec_path.clone();
let output_path = output_path.clone();
let verbose = verbose;
let dry_run = dry_run;
let progress = progress;
async move {
let mut progress_mgr = ProgressManager::new(verbose);
execute_generation(
&mut progress_mgr,
config_path,
spec_path,
output_path,
verbose,
dry_run,
progress,
)
.await
}
};
progress::watch::watch_files(files_to_watch, callback, watch_debounce).await?;
return Ok(());
}
execute_generation(
&mut progress_mgr,
config_path,
spec_path,
output_path,
verbose,
dry_run,
progress,
)
.await
}
async fn load_and_validate_config(
path: &PathBuf,
verbose: bool,
progress_mgr: &mut crate::progress::ProgressManager,
) -> mockforge_core::GenerateConfig {
use crate::progress::{utils, LogLevel};
use mockforge_core::load_generate_config_with_fallback;
if verbose {
progress_mgr
.log(LogLevel::Debug, &format!("📄 Loading configuration from: {}", path.display()));
}
if let Err(e) = utils::validate_file_path(path) {
e.display_and_exit();
}
load_generate_config_with_fallback(path).await
}
async fn execute_generation(
progress_mgr: &mut crate::progress::ProgressManager,
config_path: Option<PathBuf>,
spec_path: Option<PathBuf>,
output_path: Option<PathBuf>,
verbose: bool,
dry_run: bool,
show_progress: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_core::{discover_config_file, GenerateConfig};
use progress::{utils, CliError, ExitCode, LogLevel};
use std::time::Instant;
let start_time = Instant::now();
progress_mgr.log(LogLevel::Info, "🔧 Generating mocks from configuration...");
let (config_file, mut config) = if let Some(path) = &config_path {
let config = load_and_validate_config(path, verbose, progress_mgr).await;
(Some(path.clone()), config)
} else {
match discover_config_file() {
Ok(path) => {
let config = load_and_validate_config(&path, verbose, progress_mgr).await;
(Some(path), config)
}
Err(_) => {
if spec_path.is_none() {
progress_mgr
.log(LogLevel::Warning, "ℹ️ No configuration file found, using defaults");
return Err(CliError::new(
"No configuration file found and no spec provided. Please create mockforge.toml, mockforge.yaml, or mockforge.json, or provide --spec flag.".to_string(),
ExitCode::ConfigurationError,
).with_suggestion(
"Create a configuration file or use --spec to specify an OpenAPI specification".to_string()
).display_and_exit());
}
progress_mgr
.log(LogLevel::Warning, "ℹ️ No configuration file found, using defaults");
(None, GenerateConfig::default())
}
}
};
if let Some(spec) = &spec_path {
config.input.spec = Some(spec.clone());
}
if let Some(output) = &output_path {
config.output.path = output.clone();
}
let spec = progress::require_registry(&config.input.spec, "spec")?;
if !spec.exists() {
return Err(CliError::new(
format!("Specification file not found: {}", spec.display()),
ExitCode::FileNotFound,
)
.with_suggestion("Check the file path and ensure the specification file exists".to_string())
.display_and_exit());
}
if verbose {
progress_mgr.log(LogLevel::Debug, "🔍 Validating specification...");
}
let spec_content = match tokio::fs::read_to_string(spec).await {
Ok(content) => content,
Err(e) => CliError::new(
format!("Failed to read specification file: {}", e),
ExitCode::FileNotFound,
)
.display_and_exit(),
};
let format = match mockforge_core::spec_parser::SpecFormat::detect(&spec_content, Some(spec)) {
Ok(fmt) => fmt,
Err(e) => {
return Err(CliError::new(
format!("Failed to detect specification format: {}", e),
ExitCode::ConfigurationError,
)
.with_suggestion(
"Ensure your file is a valid OpenAPI, GraphQL, or protobuf specification"
.to_string(),
)
.display_and_exit());
}
};
if verbose {
progress_mgr
.log(LogLevel::Debug, &format!("📋 Detected format: {}", format.display_name()));
}
match format {
mockforge_core::spec_parser::SpecFormat::OpenApi20
| mockforge_core::spec_parser::SpecFormat::OpenApi30
| mockforge_core::spec_parser::SpecFormat::OpenApi31 => {
let json_value: serde_json::Value =
match serde_json::from_str::<serde_json::Value>(&spec_content) {
Ok(val) => val,
Err(_) => {
match serde_yaml::from_str(&spec_content) {
Ok(val) => val,
Err(e) => CliError::new(
format!("Invalid JSON or YAML in OpenAPI spec: {}", e),
ExitCode::ConfigurationError,
)
.display_and_exit(),
}
}
};
let validation =
mockforge_core::spec_parser::OpenApiValidator::validate(&json_value, format);
if !validation.is_valid {
let error_details: Vec<String> = validation
.errors
.iter()
.map(|e| {
let mut msg = e.message.clone();
if let Some(path) = &e.path {
msg.push_str(&format!(" (at {})", path));
}
if let Some(suggestion) = &e.suggestion {
msg.push_str(&format!(". Hint: {}", suggestion));
}
msg
})
.collect();
let error_msg = error_details.join("\n ");
return Err(CliError::new(
format!("Invalid OpenAPI specification:\n {}", error_msg),
ExitCode::ConfigurationError,
)
.with_suggestion("Fix the validation errors above and try again".to_string())
.display_and_exit());
}
if !validation.warnings.is_empty() && verbose {
progress_mgr.log(LogLevel::Warning, "⚠️ Validation warnings:");
for warning in &validation.warnings {
progress_mgr.log(LogLevel::Warning, &format!(" - {}", warning));
}
}
if verbose {
progress_mgr.log(LogLevel::Success, "✅ OpenAPI specification is valid");
}
}
mockforge_core::spec_parser::SpecFormat::GraphQL => {
let validation = mockforge_core::spec_parser::GraphQLValidator::validate(&spec_content);
if !validation.is_valid {
let error_details: Vec<String> = validation
.errors
.iter()
.map(|e| {
let mut msg = e.message.clone();
if let Some(suggestion) = &e.suggestion {
msg.push_str(&format!(". Hint: {}", suggestion));
}
msg
})
.collect();
let error_msg = error_details.join("\n ");
return Err(CliError::new(
format!("Invalid GraphQL schema:\n {}", error_msg),
ExitCode::ConfigurationError,
)
.with_suggestion("Fix the validation errors above and try again".to_string())
.display_and_exit());
}
if !validation.warnings.is_empty() && verbose {
progress_mgr.log(LogLevel::Warning, "⚠️ Validation warnings:");
for warning in &validation.warnings {
progress_mgr.log(LogLevel::Warning, &format!(" - {}", warning));
}
}
if verbose {
progress_mgr.log(LogLevel::Success, "✅ GraphQL schema is valid");
}
}
mockforge_core::spec_parser::SpecFormat::Protobuf => {
if verbose {
progress_mgr
.log(LogLevel::Info, "📋 Protobuf validation will be performed during parsing");
}
}
}
if let Err(e) = utils::validate_output_dir(&config.output.path) {
e.display_and_exit();
}
if verbose {
progress_mgr.log(LogLevel::Debug, &format!("📝 Input spec: {}", spec.display()));
progress_mgr
.log(LogLevel::Debug, &format!("📂 Output path: {}", config.output.path.display()));
if let Some(filename) = &config.output.filename {
progress_mgr.log(LogLevel::Debug, &format!("📄 Output filename: {}", filename));
}
if let Some(options) = &config.options {
progress_mgr.log(LogLevel::Debug, &format!("⚙️ Client: {:?}", options.client));
progress_mgr.log(LogLevel::Debug, &format!("⚙️ Mode: {:?}", options.mode));
progress_mgr.log(LogLevel::Debug, &format!("⚙️ Runtime: {:?}", options.runtime));
}
if !config.plugins.is_empty() {
progress_mgr.log(LogLevel::Debug, "🔌 Plugins:");
for (name, plugin) in &config.plugins {
match plugin {
mockforge_core::PluginConfig::Simple(pkg) => {
progress_mgr.log(LogLevel::Debug, &format!(" - {}: {}", name, pkg));
}
mockforge_core::PluginConfig::Advanced { package, options } => {
progress_mgr.log(
LogLevel::Debug,
&format!(" - {}: {} (with options)", name, package),
);
if !options.is_empty() {
for (k, v) in options {
progress_mgr.log(LogLevel::Debug, &format!(" - {}: {}", k, v));
}
}
}
}
}
}
}
if dry_run {
progress_mgr.log(LogLevel::Success, "✅ Configuration is valid (dry run)");
return Ok(());
}
let total_steps = 5u64;
let progress_bar = if show_progress {
Some(progress_mgr.create_main_progress(total_steps, "Generating mocks"))
} else {
None
};
progress_mgr.log_step(1, total_steps as usize, "Preparing output directory");
if config.output.clean && config.output.path.exists() {
if verbose {
progress_mgr.log(
LogLevel::Debug,
&format!("🧹 Cleaning output directory: {}", config.output.path.display()),
);
}
tokio::fs::remove_dir_all(&config.output.path).await?;
}
tokio::fs::create_dir_all(&config.output.path).await?;
if let Some(ref pb) = progress_bar {
pb.inc(1u64);
}
progress_mgr.log_step(2, total_steps as usize, "Loading OpenAPI specification");
let spec_content = tokio::fs::read_to_string(spec).await?;
let spec_size = utils::format_file_size(spec_content.len() as u64);
progress_mgr.log(LogLevel::Info, &format!("📖 Loaded OpenAPI specification ({})", spec_size));
let parsed_spec =
OpenApiSpec::from_string(&spec_content, spec.extension().and_then(|e| e.to_str()))
.map_err(|e| -> Box<dyn std::error::Error + Send + Sync> {
format!("Failed to parse OpenAPI specification: {}", e).into()
})?;
let naming_context = if config.output.file_naming_template.is_some() {
Some(build_file_naming_context(&parsed_spec))
} else {
None
};
if let Some(ref pb) = progress_bar {
pb.inc(1u64);
}
progress_mgr.log_step(3, total_steps as usize, "Generating mock server code");
let base_filename =
config.output.filename.clone().unwrap_or_else(|| "generated_mock".to_string());
let extension = config.output.extension.clone().unwrap_or_else(|| "rs".to_string());
let mut output_file = config.output.path.join(format!("{}.{}", base_filename, extension));
let codegen_config = mockforge_core::codegen::CodegenConfig {
mock_data_strategy: mockforge_core::codegen::MockDataStrategy::ExamplesOrRandom,
port: None, enable_cors: false,
default_delay_ms: None,
};
let raw_mock_code = mockforge_core::codegen::generate_mock_server_code(
&parsed_spec,
&extension,
&codegen_config,
)
.map_err(|e| -> Box<dyn std::error::Error + Send + Sync> {
format!("Failed to generate mock server code: {}", e).into()
})?;
let mut generated_file = GeneratedFile {
path: output_file
.strip_prefix(&config.output.path)
.unwrap_or(&output_file)
.to_path_buf(),
content: raw_mock_code,
extension: extension.clone(),
exportable: matches!(extension.as_str(), "ts" | "tsx" | "js" | "jsx" | "mjs"),
};
generated_file =
process_generated_file(generated_file, &config.output, Some(spec), naming_context.as_ref());
output_file = config.output.path.join(&generated_file.path);
tokio::fs::write(&output_file, generated_file.content.clone()).await?;
let mut all_generated_files = vec![generated_file];
if let Some(ref pb) = progress_bar {
pb.inc(1u64);
}
progress_mgr.log_step(4, total_steps as usize, "Generating additional files");
let readme_content = format!(
r#"# Generated Mock Server
This mock server was generated by MockForge from the OpenAPI specification:
- Source: {}
- Generated: {}
## Usage
```bash
# Start the mock server
cargo run
# Or use MockForge CLI
mockforge serve --spec {}
```
## Files Generated
- `{}` - Main mock server implementation
- `README.md` - This file
"#,
spec.display(),
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"),
spec.display(),
{
use crate::progress::get_file_name;
get_file_name(&output_file).unwrap_or_else(|e| {
eprintln!("{}", e.message);
if let Some(suggestion) = e.suggestion {
eprintln!("💡 {}", suggestion);
}
std::process::exit(e.exit_code as i32);
})
}
);
let readme_file = config.output.path.join("README.md");
tokio::fs::write(&readme_file, readme_content).await?;
if let Some(ref pb) = progress_bar {
pb.inc(1u64);
}
if config.output.barrel_type != mockforge_core::BarrelType::None {
if verbose {
progress_mgr.log(
LogLevel::Debug,
&format!("📦 Generating barrel files (type: {:?})", config.output.barrel_type),
);
}
match BarrelGenerator::generate_barrel_files(
&config.output.path,
&all_generated_files,
config.output.barrel_type,
) {
Ok(barrel_files) => {
for (barrel_path, barrel_content) in barrel_files {
tokio::fs::write(&barrel_path, barrel_content).await?;
if verbose {
progress_mgr.log(
LogLevel::Debug,
&format!("📄 Generated barrel file: {}", barrel_path.display()),
);
}
}
}
Err(e) => {
progress_mgr
.log(LogLevel::Warning, &format!("⚠️ Failed to generate barrel files: {}", e));
}
}
}
progress_mgr.log_step(5, total_steps as usize, "Finalizing generation");
let duration = start_time.elapsed();
let duration_str = utils::format_duration(duration);
let total_files = all_generated_files.len() + 1;
progress_mgr
.log(LogLevel::Success, &format!("✅ Mock generation completed in {}", duration_str));
progress_mgr.log(
LogLevel::Info,
&format!("📁 Output directory: {}", config.output.path.display()),
);
progress_mgr.log(LogLevel::Info, &format!("📄 Generated files: {} files", total_files));
if let Some(ref pb) = progress_bar {
pb.finish();
}
Ok(())
}
async fn handle_init(
name: String,
no_examples: bool,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use std::fs;
println!("🚀 Initializing MockForge project...");
let project_dir = if name == "." {
std::env::current_dir()?
} else {
PathBuf::from(&name)
};
if !project_dir.exists() {
fs::create_dir_all(&project_dir)?;
println!("📁 Created directory: {}", project_dir.display());
}
let config_path = project_dir.join("mockforge.yaml");
if config_path.exists() {
println!("⚠️ Configuration file already exists: {}", config_path.display());
} else {
let openapi_spec_line = if !no_examples {
" openapi_spec: \"./examples/openapi.json\"\n"
} else {
""
};
let config_content = format!(
r#"# MockForge Configuration
# Full configuration reference: https://docs.mockforge.dev/config
# HTTP Server
http:
port: 3000
host: "0.0.0.0"
cors_enabled: true
request_timeout_secs: 30
request_validation: "enforce"
aggregate_validation_errors: true
validate_responses: false
response_template_expand: false
validation_overrides: {{}}
skip_admin_validation: true
# WebSocket Server
websocket:
port: 3001
host: "0.0.0.0"
connection_timeout_secs: 300
# gRPC Server
grpc:
port: 50051
host: "0.0.0.0"
# Admin UI
admin:
enabled: true
port: 9080
host: "127.0.0.1"
api_enabled: true
auth_required: false
prometheus_url: "http://localhost:9090"
# Core Features
core:
latency_enabled: true
failures_enabled: false
overrides_enabled: true
traffic_shaping_enabled: false
# Observability
observability:
prometheus:
enabled: true
port: 9090
host: "0.0.0.0"
path: "/metrics"
opentelemetry: null
recorder: null
chaos: null
# Data Generation
data:
default_rows: 100
default_format: "json"
locale: "en"
templates: {}
rag:
enabled: false
provider: "openai"
# Logging
logging:
level: "info"
json_format: false
max_file_size_mb: 10
max_files: 5
"#,
openapi_spec_line
);
fs::write(&config_path, config_content)?;
println!("✅ Created mockforge.yaml");
}
if !no_examples {
let examples_dir = project_dir.join("examples");
fs::create_dir_all(&examples_dir)?;
println!("📁 Created examples directory");
let openapi_path = examples_dir.join("openapi.json");
let openapi_content = r#"{
"openapi": "3.0.0",
"info": {
"title": "Example API",
"version": "1.0.0"
},
"paths": {
"/health": {
"get": {
"summary": "Health check",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"status": {
"type": "string"
}
}
}
}
}
}
}
}
},
"/users": {
"get": {
"summary": "List users",
"responses": {
"200": {
"description": "OK",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "string"
},
"email": {
"type": "string"
}
}
}
}
}
}
}
}
}
}
}
}"#;
fs::write(&openapi_path, openapi_content)?;
println!("✅ Created examples/openapi.json");
let fixtures_dir = project_dir.join("fixtures");
fs::create_dir_all(&fixtures_dir)?;
let fixture_path = fixtures_dir.join("users.json");
let fixture_content = r#"[
{
"id": 1,
"name": "Alice Johnson",
"email": "alice@example.com"
},
{
"id": 2,
"name": "Bob Smith",
"email": "bob@example.com"
}
]"#;
fs::write(&fixture_path, fixture_content)?;
println!("✅ Created fixtures/users.json");
}
println!("\n🎉 MockForge project initialized successfully!");
println!("\nNext steps:");
println!(" 1. cd {}", if name == "." { "." } else { &name });
println!(" 2. Edit mockforge.yaml to configure your mock servers");
if !no_examples {
println!(" 3. Review examples/openapi.json for API specifications");
}
println!(" 4. Run: mockforge serve --config mockforge.yaml");
println!("\nNote: CLI arguments override config file settings");
Ok(())
}
async fn handle_config(
config_command: ConfigCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match config_command {
ConfigCommands::Validate { config } => {
handle_config_validate(config).await?;
}
}
Ok(())
}
async fn handle_config_validate(
config_path: Option<PathBuf>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
println!("🔍 Validating MockForge configuration...");
let config_file = if let Some(path) = config_path {
path
} else {
discover_config_file()?
};
println!("📄 Checking configuration file: {}", config_file.display());
if !config_file.exists() {
return Err(format!("Configuration file not found: {}", config_file.display()).into());
}
let config_content = tokio::fs::read_to_string(&config_file).await?;
let is_yaml = config_file
.extension()
.and_then(|s| s.to_str())
.map(|ext| ext == "yaml" || ext == "yml")
.unwrap_or(true);
let config_result = if is_yaml {
serde_yaml::from_str::<mockforge_core::ServerConfig>(&config_content)
.map_err(|e| format_yaml_error(&config_content, e))
} else {
serde_json::from_str::<mockforge_core::ServerConfig>(&config_content)
.map_err(|e| format_json_error(&config_content, e))
};
match config_result {
Ok(config) => {
let mut warnings = Vec::new();
let mut errors = Vec::new();
if config.http.host.is_empty() {
errors.push("HTTP host is empty".to_string());
}
if config.http.port == 0 {
errors.push("HTTP port cannot be 0".to_string());
}
if let Some(ref spec_path) = config.http.openapi_spec {
if !std::path::Path::new(spec_path).exists() {
errors.push(format!("OpenAPI spec file not found: {}", spec_path));
} else {
println!(" ✓ OpenAPI spec: {}", spec_path);
}
} else {
warnings.push(
"No OpenAPI spec configured. HTTP endpoints will need to be defined manually."
.to_string(),
);
}
let valid_modes = ["off", "warn", "enforce"];
if let Some(validation) = &config.http.validation {
if !valid_modes.contains(&validation.mode.as_str()) {
errors.push(format!(
"Invalid request validation mode '{}'. Must be one of: off, warn, enforce",
validation.mode
));
}
}
if let Some(ref auth) = config.http.auth {
if let Some(ref jwt) = auth.jwt {
if let Some(ref secret) = jwt.secret {
if secret.is_empty() {
errors.push(
"HTTP JWT auth is configured but secret is empty".to_string(),
);
}
} else if jwt.rsa_public_key.is_none() && jwt.ecdsa_public_key.is_none() {
errors.push("HTTP JWT auth requires at least one key (secret, rsa_public_key, or ecdsa_public_key)".to_string());
}
}
if let Some(ref basic) = auth.basic_auth {
if basic.credentials.is_empty() {
warnings.push(
"HTTP Basic auth is configured but no credentials are defined"
.to_string(),
);
}
}
}
if config.websocket.port == 0 {
errors.push("WebSocket port cannot be 0".to_string());
}
if config.websocket.port == config.http.port {
errors.push("WebSocket port conflicts with HTTP port".to_string());
}
if config.grpc.port == 0 {
errors.push("gRPC port cannot be 0".to_string());
}
if config.grpc.port == config.http.port || config.grpc.port == config.websocket.port {
errors.push("gRPC port conflicts with HTTP or WebSocket port".to_string());
}
if config.chaining.enabled {
if config.chaining.max_chain_length == 0 {
errors.push("Chaining is enabled but max_chain_length is 0".to_string());
}
if config.chaining.global_timeout_secs == 0 {
warnings.push("Chaining global timeout is 0 (no timeout)".to_string());
}
println!(
" ✓ Request chaining: enabled (max length: {})",
config.chaining.max_chain_length
);
}
if config.admin.enabled {
if config.admin.port == 0 {
errors.push("Admin UI is enabled but port is 0".to_string());
}
if config.admin.port == config.http.port
|| config.admin.port == config.websocket.port
|| config.admin.port == config.grpc.port
{
errors.push("Admin UI port conflicts with another service port".to_string());
}
if config.admin.auth_required
&& (config.admin.username.is_none() || config.admin.password.is_none())
{
errors.push(
"Admin UI auth is required but username/password not configured"
.to_string(),
);
}
} else {
warnings
.push("Admin UI is disabled. Enable with 'admin.enabled: true'.".to_string());
}
if config.observability.prometheus.enabled && config.observability.prometheus.port == 0
{
errors.push("Prometheus metrics enabled but port is 0".to_string());
}
if let Some(ref otel) = config.observability.opentelemetry {
if otel.enabled {
if otel.service_name.is_empty() {
warnings.push("OpenTelemetry service name is empty".to_string());
}
println!(" ✓ OpenTelemetry: enabled (service: {})", otel.service_name);
}
}
if let Some(ref recorder) = config.observability.recorder {
if recorder.enabled {
if recorder.database_path.is_empty() {
errors.push("Recorder is enabled but database path is empty".to_string());
}
println!(" ✓ Recorder: enabled (db: {})", recorder.database_path);
}
}
if !errors.is_empty() {
println!("\n❌ Configuration has errors:");
for error in &errors {
println!(" ✗ {}", error);
}
return Err("Configuration validation failed".into());
}
println!("\n✅ Configuration is valid");
println!("\n📊 Summary:");
println!(" HTTP server: {}:{}", config.http.host, config.http.port);
println!(" WebSocket server: {}:{}", config.websocket.host, config.websocket.port);
println!(" gRPC server: {}:{}", config.grpc.host, config.grpc.port);
if config.admin.enabled {
println!(" Admin UI: http://{}:{}", config.admin.host, config.admin.port);
}
if config.observability.prometheus.enabled {
println!(
" Prometheus metrics: http://{}:{}/metrics",
config.http.host, config.observability.prometheus.port
);
}
if !warnings.is_empty() {
println!("\n⚠️ Warnings:");
for warning in warnings {
println!(" - {}", warning);
}
}
Ok(())
}
Err(error_msg) => {
println!("❌ Configuration validation failed:\n");
println!("{}", error_msg);
Err("Invalid configuration".into())
}
}
}
fn format_yaml_error(content: &str, error: serde_yaml::Error) -> String {
let mut message = String::from("❌ Configuration parsing error:\n\n");
let error_str = error.to_string();
let field_path = extract_field_path(&error_str);
if let Some(location) = error.location() {
let line = location.line();
let column = location.column();
message.push_str(&format!("📍 Location: line {}, column {}\n\n", line, column));
let lines: Vec<&str> = content.lines().collect();
let start = line.saturating_sub(2);
let end = (line + 1).min(lines.len());
for (idx, line_content) in lines[start..end].iter().enumerate() {
let line_num = start + idx + 1;
if line_num == line {
message.push_str(&format!(" > {} | {}\n", line_num, line_content));
if column > 0 {
message.push_str(&format!(
" {}^\n",
" ".repeat(column + 5 + line_num.to_string().len())
));
}
} else {
message.push_str(&format!(" {} | {}\n", line_num, line_content));
}
}
message.push_str("\n");
}
if let Some(path) = &field_path {
message.push_str(&format!("🔍 Field path: {}\n", path));
message.push_str(&format!("❌ Error: {}\n\n", error));
} else {
message.push_str(&format!("❌ Error: {}\n\n", error));
}
if error_str.contains("duplicate key") {
message.push_str("💡 Tip: You have a duplicate key in your YAML. Each key must be unique within its section.\n");
} else if error_str.contains("invalid type") {
message.push_str("💡 Tip: Check that your values match the expected types (strings, numbers, booleans, arrays, objects).\n");
if let Some(path) = &field_path {
message.push_str(&format!(" Check the type for field: {}\n", path));
}
} else if error_str.contains("missing field") {
message.push_str("💡 Tip: This field is usually optional and has a default value.\n");
message.push_str(
" Most configuration fields can be omitted - MockForge will use sensible defaults.\n",
);
if let Some(path) = &field_path {
message.push_str(&format!(" \n To fix: Either add the field at path '{}' or remove it entirely (defaults will be used).\n", path));
message.push_str(
" See config.template.yaml for all available options and their defaults.\n",
);
} else {
message.push_str(
" See config.template.yaml for all available options and their defaults.\n",
);
}
} else if error_str.contains("unknown field") {
message.push_str("💡 Tip: You may have a typo in a field name.\n");
if let Some(path) = &field_path {
message.push_str(&format!(" Unknown field at path: {}\n", path));
message.push_str(
" Check the spelling against the documentation or config.template.yaml.\n",
);
} else {
message.push_str(
" Check the spelling against the documentation or config.template.yaml.\n",
);
}
} else if error_str.contains("expected") {
message.push_str("💡 Tip: There's a type mismatch or syntax error.\n");
if let Some(path) = &field_path {
message.push_str(&format!(" Check the value type for field: {}\n", path));
}
}
message.push_str("\n📚 For a complete example configuration, see: config.template.yaml\n");
message.push_str(" Or run: mockforge init .\n");
message
}
fn format_json_error(content: &str, error: serde_json::Error) -> String {
let mut message = String::from("❌ Configuration parsing error:\n\n");
let error_str = error.to_string();
let field_path = extract_field_path(&error_str);
let line = error.line();
let column = error.column();
message.push_str(&format!("📍 Location: line {}, column {}\n\n", line, column));
let lines: Vec<&str> = content.lines().collect();
let start = line.saturating_sub(2);
let end = (line + 1).min(lines.len());
for (idx, line_content) in lines[start..end].iter().enumerate() {
let line_num = start + idx + 1;
if line_num == line {
message.push_str(&format!(" > {} | {}\n", line_num, line_content));
if column > 0 {
message.push_str(&format!(
" {}^\n",
" ".repeat(column + 5 + line_num.to_string().len())
));
}
} else {
message.push_str(&format!(" {} | {}\n", line_num, line_content));
}
}
message.push_str("\n");
if let Some(path) = &field_path {
message.push_str(&format!("🔍 Field path: {}\n", path));
message.push_str(&format!("❌ Error: {}\n\n", error));
} else {
message.push_str(&format!("❌ Error: {}\n\n", error));
}
if error_str.contains("trailing comma") {
message.push_str(
"💡 Tip: JSON doesn't allow trailing commas. Remove the comma after the last item.\n",
);
} else if error_str.contains("missing field") {
message.push_str("💡 Tip: This field is usually optional and has a default value.\n");
message.push_str(
" Most configuration fields can be omitted - MockForge will use sensible defaults.\n",
);
if let Some(path) = &field_path {
message.push_str(&format!(" \n To fix: Either add the field at path '{}' or remove it entirely (defaults will be used).\n", path));
}
message.push_str(
" See config.template.yaml for all available options and their defaults.\n",
);
} else if error_str.contains("duplicate field") {
message.push_str(
"💡 Tip: You have a duplicate key. Each key must be unique within its object.\n",
);
} else if error_str.contains("expected") {
message
.push_str("💡 Tip: Check for missing or extra brackets, braces, quotes, or commas.\n");
if let Some(path) = &field_path {
message.push_str(&format!(" Or check the value type for field: {}\n", path));
}
} else if error_str.contains("unknown field") {
message.push_str("💡 Tip: You may have a typo in a field name.\n");
if let Some(path) = &field_path {
message.push_str(&format!(" Unknown field at path: {}\n", path));
}
message
.push_str(" Check the spelling against the documentation or config.template.yaml.\n");
}
message.push_str("\n📚 For a complete example configuration, see: config.template.yaml\n");
message.push_str(" Or run: mockforge init .\n");
message
}
fn extract_field_path(error_msg: &str) -> Option<String> {
if let Some(start) = error_msg.find("field `") {
let after_field = &error_msg[start + 7..];
if let Some(end) = after_field.find('`') {
let field_name = &after_field[..end];
if let Some(path_context_start) = error_msg.rfind(" at ") {
let path_context = &error_msg[..path_context_start];
for section in ["http", "admin", "websocket", "grpc", "core", "logging"] {
if path_context.contains(section) {
return Some(format!("{}.{}", section, field_name));
}
}
}
return Some(field_name.to_string());
}
}
if let Some(start) = error_msg.find("invalid type") {
if let Some(field_start) = error_msg[..start].rfind("field `") {
let after_field = &error_msg[field_start + 7..];
if let Some(end) = after_field.find('`') {
return Some(after_field[..end].to_string());
}
}
}
None
}
fn discover_config_file() -> Result<PathBuf, Box<dyn std::error::Error + Send + Sync>> {
let current_dir = std::env::current_dir()?;
let config_names = vec![
"mockforge.yaml",
"mockforge.yml",
".mockforge.yaml",
".mockforge.yml",
];
for name in &config_names {
let path = current_dir.join(name);
if path.exists() {
return Ok(path);
}
}
let mut dir = current_dir.clone();
for _ in 0..5 {
if let Some(parent) = dir.parent() {
for name in &config_names {
let path = parent.join(name);
if path.exists() {
return Ok(path);
}
}
dir = parent.to_path_buf();
} else {
break;
}
}
Err("No configuration file found. Expected one of: mockforge.yaml, mockforge.yml, .mockforge.yaml, .mockforge.yml".into())
}
async fn handle_test_ai(
ai_command: AiTestCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match ai_command {
AiTestCommands::IntelligentMock {
prompt,
rag_provider,
rag_model,
output,
} => {
println!("🧠 Testing Intelligent Mock Generation");
println!("📝 Prompt: {}", prompt);
let rag_config = load_rag_config(rag_provider, rag_model, None, None, None);
use mockforge_data::{IntelligentMockConfig, IntelligentMockGenerator, ResponseMode};
let config = IntelligentMockConfig::new(ResponseMode::Intelligent)
.with_prompt(prompt)
.with_rag_config(rag_config);
let mut generator = IntelligentMockGenerator::new(config)?;
println!("🎯 Generating mock data...");
let result = generator.generate().await?;
let output_str = serde_json::to_string_pretty(&result)?;
if let Some(path) = output {
tokio::fs::write(&path, &output_str).await?;
println!("💾 Output written to: {}", path.display());
} else {
println!("\n📄 Generated Mock Data:");
println!("{}", output_str);
}
println!("✅ Intelligent mock generation completed successfully!");
}
AiTestCommands::Drift {
initial_data,
iterations,
output,
} => {
println!("📊 Testing Data Drift Simulation");
println!("📁 Initial data: {}", initial_data.display());
println!("🔄 Iterations: {}", iterations);
let data_content = tokio::fs::read_to_string(&initial_data).await?;
let mut current_data: serde_json::Value = serde_json::from_str(&data_content)?;
use mockforge_data::drift::{DriftRule, DriftStrategy};
use mockforge_data::DataDriftConfig;
let rule = DriftRule::new("value".to_string(), DriftStrategy::Linear).with_rate(1.0);
let drift_config = DataDriftConfig::new().with_rule(rule);
let engine = mockforge_data::DataDriftEngine::new(drift_config)?;
println!("\n🎯 Simulating drift:");
let mut results = vec![current_data.clone()];
for i in 1..=iterations {
current_data = engine.apply_drift(current_data).await?;
results.push(current_data.clone());
println!(" Iteration {}: {:?}", i, current_data);
}
let output_str = serde_json::to_string_pretty(&results)?;
if let Some(path) = output {
tokio::fs::write(&path, &output_str).await?;
println!("\n💾 Output written to: {}", path.display());
} else {
println!("\n📄 Final Drifted Data:");
println!("{}", serde_json::to_string_pretty(¤t_data)?);
}
println!("✅ Data drift simulation completed successfully!");
}
AiTestCommands::EventStream {
narrative,
event_count,
rag_provider,
rag_model,
output,
} => {
println!("🌊 Testing AI Event Stream Generation");
println!("📖 Narrative: {}", narrative);
println!("🔢 Event count: {}", event_count);
let rag_config = load_rag_config(rag_provider, rag_model, None, None, None);
use mockforge_data::{EventStrategy, ReplayAugmentationConfig, ReplayMode};
let config = ReplayAugmentationConfig {
mode: ReplayMode::Generated,
strategy: EventStrategy::CountBased,
narrative: Some(narrative),
event_count: Some(event_count),
rag_config: Some(rag_config),
..Default::default()
};
let mut engine = mockforge_data::ReplayAugmentationEngine::new(config)?;
println!("🎯 Generating event stream...");
let events = engine.generate_stream().await?;
let output_str = serde_json::to_string_pretty(&events)?;
if let Some(path) = output {
tokio::fs::write(&path, &output_str).await?;
println!("💾 Output written to: {}", path.display());
} else {
println!("\n📄 Generated Events:");
for (i, event) in events.iter().enumerate() {
println!("\nEvent {}:", i + 1);
println!(" Type: {}", event.event_type);
println!(" Timestamp: {}", event.timestamp);
println!(" Data: {}", serde_json::to_string_pretty(&event.data)?);
}
}
println!("\n✅ Event stream generation completed successfully!");
println!(" Generated {} events", events.len());
}
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn handle_generate_tests(
database: PathBuf,
format: String,
output: Option<PathBuf>,
protocol: Option<String>,
method: Option<String>,
path: Option<String>,
status_code: Option<u16>,
limit: usize,
suite_name: String,
base_url: String,
ai_descriptions: bool,
llm_provider: String,
llm_model: String,
llm_endpoint: Option<String>,
llm_api_key: Option<String>,
validate_body: bool,
validate_status: bool,
validate_headers: bool,
validate_timing: bool,
max_duration_ms: Option<u64>,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
use mockforge_recorder::{
LlmConfig, Protocol, QueryFilter, RecorderDatabase, TestFormat, TestGenerationConfig,
TestGenerator,
};
println!("🧪 Generating tests from recorded API interactions");
println!("📁 Database: {}", database.display());
println!("📝 Format: {}", format);
println!("🎯 Suite name: {}", suite_name);
use crate::progress::{CliError, ExitCode};
let db_path = database.to_str().ok_or_else(|| {
CliError::new(
format!("Invalid database path: {}", database.display()),
ExitCode::FileNotFound,
)
.with_suggestion(
"Ensure the database path contains only valid UTF-8 characters".to_string(),
)
})?;
let db = RecorderDatabase::new(db_path).await?;
println!("✅ Database opened successfully");
let test_format = match format.as_str() {
"rust_reqwest" => TestFormat::RustReqwest,
"http_file" => TestFormat::HttpFile,
"curl" => TestFormat::Curl,
"postman" => TestFormat::Postman,
"k6" => TestFormat::K6,
"python_pytest" => TestFormat::PythonPytest,
"javascript_jest" => TestFormat::JavaScriptJest,
"go_test" => TestFormat::GoTest,
_ => {
eprintln!("❌ Invalid format: {}. Supported formats: rust_reqwest, http_file, curl, postman, k6, python_pytest, javascript_jest, go_test", format);
return Err("Invalid format".into());
}
};
let protocol_filter = protocol.as_ref().and_then(|p| match p.to_lowercase().as_str() {
"http" => Some(Protocol::Http),
"grpc" => Some(Protocol::Grpc),
"websocket" => Some(Protocol::WebSocket),
"graphql" => Some(Protocol::GraphQL),
_ => None,
});
let llm_config = if ai_descriptions {
let endpoint = llm_endpoint.unwrap_or_else(|| {
if llm_provider == "ollama" {
"http://localhost:11434/api/generate".to_string()
} else {
"https://api.openai.com/v1/chat/completions".to_string()
}
});
Some(LlmConfig {
provider: llm_provider.clone(),
api_endpoint: endpoint,
api_key: llm_api_key,
model: llm_model.clone(),
temperature: 0.3,
})
} else {
None
};
let config = TestGenerationConfig {
format: test_format,
include_assertions: true,
validate_body,
validate_status,
validate_headers,
validate_timing,
max_duration_ms,
suite_name: suite_name.clone(),
base_url: Some(base_url.clone()),
ai_descriptions,
llm_config,
group_by_endpoint: true,
include_setup_teardown: true,
generate_fixtures: ai_descriptions,
suggest_edge_cases: ai_descriptions,
analyze_test_gaps: ai_descriptions,
deduplicate_tests: true,
optimize_test_order: false,
};
let filter = QueryFilter {
protocol: protocol_filter,
method: method.clone(),
path: path.clone(),
status_code: status_code.map(|c| c as i32),
trace_id: None,
min_duration_ms: None,
max_duration_ms: None,
tags: None,
limit: Some(limit as i32),
offset: Some(0),
};
println!("🔍 Searching for recordings...");
if let Some(p) = &protocol {
println!(" Protocol: {}", p);
}
if let Some(m) = &method {
println!(" Method: {}", m);
}
if let Some(p) = &path {
println!(" Path: {}", p);
}
if let Some(s) = status_code {
println!(" Status code: {}", s);
}
println!(" Limit: {}", limit);
let generator = TestGenerator::new(db, config);
println!("\n🎨 Generating tests...");
if ai_descriptions {
println!("🤖 Using {} ({}) for AI descriptions", llm_provider, llm_model);
}
let result = generator.generate_from_filter(filter).await?;
println!("\n✅ Test generation completed successfully!");
println!(" Generated {} tests", result.metadata.test_count);
println!(" Covering {} endpoints", result.metadata.endpoint_count);
println!(" Protocols: {:?}", result.metadata.protocols);
if let Some(output_path) = output {
tokio::fs::write(&output_path, &result.test_file).await?;
println!("\n💾 Tests written to: {}", output_path.display());
} else {
println!("\n📄 Generated Test File:");
println!("{}", "=".repeat(60));
println!("{}", result.test_file);
println!("{}", "=".repeat(60));
}
println!("\n📊 Test Summary:");
for (i, test) in result.tests.iter().enumerate() {
println!(" {}. {} - {} {}", i + 1, test.name, test.method, test.endpoint);
if ai_descriptions
&& !test.description.is_empty()
&& test.description != format!("Test {} {}", test.method, test.endpoint)
{
println!(" Description: {}", test.description);
}
}
println!("\n🎉 Done! You can now run the generated tests.");
Ok(())
}
async fn handle_orchestrate(
command: OrchestrateCommands,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match command {
OrchestrateCommands::Start { file, base_url } => {
println!("🚀 Starting chaos orchestration from: {}", file.display());
let content = std::fs::read_to_string(&file)?;
let format = if file.extension().and_then(|s| s.to_str()) == Some("json") {
"json"
} else {
"yaml"
};
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/orchestration/import", base_url);
let response = client
.post(&url)
.json(&serde_json::json!({
"content": content,
"format": format
}))
.send()
.await?;
if response.status().is_success() {
let result: serde_json::Value = response.json().await?;
println!("✅ {}", result["message"].as_str().unwrap_or("Orchestration imported"));
let _start_url = format!("{}/api/chaos/orchestration/start", base_url);
println!(" Use the API to start the orchestration");
} else {
eprintln!("❌ Failed to import orchestration: {}", response.status());
}
}
OrchestrateCommands::Status { base_url } => {
println!("📊 Checking orchestration status...");
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/orchestration/status", base_url);
let response = client.get(&url).send().await?;
if response.status().is_success() {
let status: serde_json::Value = response.json().await?;
if status["is_running"].as_bool().unwrap_or(false) {
println!("✅ Orchestration is running");
println!(" Name: {}", status["name"].as_str().unwrap_or("Unknown"));
println!(
" Progress: {:.1}%",
status["progress"].as_f64().unwrap_or(0.0) * 100.0
);
} else {
println!("⏸️ No orchestration currently running");
}
} else {
eprintln!("❌ Failed to get status: {}", response.status());
}
}
OrchestrateCommands::Stop { base_url } => {
println!("🛑 Stopping orchestration...");
let client = reqwest::Client::new();
let url = format!("{}/api/chaos/orchestration/stop", base_url);
let response = client.post(&url).send().await?;
if response.status().is_success() {
let result: serde_json::Value = response.json().await?;
println!("✅ {}", result["message"].as_str().unwrap_or("Orchestration stopped"));
} else {
eprintln!("❌ Failed to stop orchestration: {}", response.status());
}
}
OrchestrateCommands::Validate { file } => {
println!("🔍 Validating orchestration file: {}", file.display());
if !file.exists() {
eprintln!("❌ File not found: {}", file.display());
return Err("File not found".into());
}
let content = std::fs::read_to_string(&file)?;
let is_json = file.extension().and_then(|s| s.to_str()) == Some("json");
let parse_result: Result<serde_json::Value, String> = if is_json {
serde_json::from_str::<serde_json::Value>(&content)
.map_err(|e| format_json_error(&content, e))
} else {
serde_yaml::from_str::<serde_yaml::Value>(&content)
.map_err(|e| format_yaml_error(&content, e))
.and_then(|yaml_val| {
serde_json::to_value(yaml_val)
.map_err(|e| format!("Failed to convert YAML to JSON: {}", e))
})
};
match parse_result {
Ok(value) => {
let mut errors = Vec::new();
let mut warnings = Vec::new();
if value.get("name").is_none() {
errors.push("Missing required field 'name'".to_string());
} else if !value["name"].is_string() {
errors.push("Field 'name' must be a string".to_string());
}
match value.get("steps") {
None => {
errors.push("Missing required field 'steps'".to_string());
}
Some(steps) => {
if let Some(steps_arr) = steps.as_array() {
if steps_arr.is_empty() {
warnings.push(
"Steps array is empty - orchestration won't do anything"
.to_string(),
);
}
for (idx, step) in steps_arr.iter().enumerate() {
let step_num = idx + 1;
if !step.is_object() {
errors.push(format!("Step #{} is not an object", step_num));
continue;
}
if step.get("name").is_none() {
errors.push(format!(
"Step #{} is missing 'name' field",
step_num
));
}
match step.get("scenario") {
None => {
errors.push(format!(
"Step #{} is missing 'scenario' field",
step_num
));
}
Some(scenario) => {
if scenario.get("name").is_none() {
errors.push(format!(
"Step #{} scenario is missing 'name' field",
step_num
));
}
if scenario.get("config").is_none() {
errors.push(format!(
"Step #{} scenario is missing 'config' field",
step_num
));
}
}
}
if step.get("duration_seconds").is_none() {
warnings.push(format!("Step #{} is missing 'duration_seconds' - using default", step_num));
} else if !step["duration_seconds"].is_number() {
errors.push(format!(
"Step #{} 'duration_seconds' must be a number",
step_num
));
}
if let Some(delay) = step.get("delay_before_seconds") {
if !delay.is_number() {
errors.push(format!(
"Step #{} 'delay_before_seconds' must be a number",
step_num
));
}
}
}
} else {
errors.push("Field 'steps' must be an array".to_string());
}
}
}
if !errors.is_empty() {
println!("❌ Orchestration file has errors:");
for error in &errors {
println!(" ✗ {}", error);
}
return Err("Validation failed".into());
}
println!("✅ Orchestration file is valid");
if let Some(name) = value.get("name").and_then(|v| v.as_str()) {
println!("\n📊 Summary:");
println!(" Name: {}", name);
if let Some(desc) = value.get("description").and_then(|v| v.as_str()) {
println!(" Description: {}", desc);
}
if let Some(steps) = value.get("steps").and_then(|v| v.as_array()) {
println!(" Steps: {}", steps.len());
}
}
if !warnings.is_empty() {
println!("\n⚠️ Warnings:");
for warning in warnings {
println!(" - {}", warning);
}
}
}
Err(error_msg) => {
println!("❌ Orchestration file validation failed:\n");
println!("{}", error_msg);
return Err("Invalid orchestration file".into());
}
}
}
OrchestrateCommands::Template { output, format } => {
println!("📝 Generating orchestration template...");
let template = if format == "json" {
serde_json::to_string_pretty(&serde_json::json!({
"name": "example_orchestration",
"description": "Example chaos orchestration",
"steps": [
{
"name": "warmup",
"scenario": {
"name": "network_degradation",
"config": {
"enabled": true,
"latency": {
"enabled": true,
"fixed_delay_ms": 100
}
}
},
"duration_seconds": 60,
"delay_before_seconds": 0,
"continue_on_failure": false
},
{
"name": "peak_load",
"scenario": {
"name": "peak_traffic",
"config": {
"enabled": true,
"rate_limit": {
"enabled": true,
"requests_per_second": 100
}
}
},
"duration_seconds": 120,
"delay_before_seconds": 10,
"continue_on_failure": true
}
],
"parallel": false,
"loop_orchestration": false,
"max_iterations": 1,
"tags": ["example", "test"]
}))?
} else {
"name: example_orchestration
description: Example chaos orchestration
steps:
- name: warmup
scenario:
name: network_degradation
config:
enabled: true
latency:
enabled: true
fixed_delay_ms: 100
duration_seconds: 60
delay_before_seconds: 0
continue_on_failure: false
- name: peak_load
scenario:
name: peak_traffic
config:
enabled: true
rate_limit:
enabled: true
requests_per_second: 100
duration_seconds: 120
delay_before_seconds: 10
continue_on_failure: true
parallel: false
loop_orchestration: false
max_iterations: 1
tags:
- example
- test
"
.to_string()
};
std::fs::write(&output, template)?;
println!("✅ Template saved to: {}", output.display());
}
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn handle_suggest(
from: Option<PathBuf>,
from_description: Option<String>,
format: String,
output: Option<PathBuf>,
num_suggestions: usize,
include_examples: bool,
domain: Option<String>,
llm_provider: String,
llm_model: Option<String>,
llm_endpoint: Option<String>,
llm_api_key: Option<String>,
temperature: f64,
print_json: bool,
) -> anyhow::Result<()> {
use mockforge_core::intelligent_behavior::{
config::BehaviorModelConfig, OutputFormat, SpecSuggestionEngine, SuggestionConfig,
SuggestionInput,
};
let output_format = format.parse::<OutputFormat>().map_err(|e| anyhow::anyhow!("{}", e))?;
let default_model = match llm_provider.to_lowercase().as_str() {
"openai" => "gpt-4o-mini",
"anthropic" => "claude-3-5-sonnet-20241022",
"ollama" => "llama3.1",
_ => "gpt-4o-mini",
};
let llm_config = BehaviorModelConfig {
llm_provider: llm_provider.clone(),
model: llm_model.unwrap_or_else(|| default_model.to_string()),
api_endpoint: llm_endpoint,
api_key: llm_api_key,
temperature,
max_tokens: 4000,
..Default::default()
};
let suggestion_config = SuggestionConfig {
llm_config,
output_format,
num_suggestions,
include_examples,
domain_hint: domain,
};
let input = if let Some(description) = from_description {
SuggestionInput::Description { text: description }
} else if let Some(input_path) = from {
let content = tokio::fs::read_to_string(&input_path).await?;
let json_value: serde_json::Value = serde_json::from_str(&content)?;
if let Some(method) = json_value.get("method").and_then(|v| v.as_str()) {
SuggestionInput::Endpoint {
method: method.to_string(),
path: json_value
.get("path")
.and_then(|v| v.as_str())
.ok_or_else(|| anyhow::anyhow!("Missing 'path' field in endpoint input"))?
.to_string(),
request: json_value.get("request").cloned(),
response: json_value.get("response").cloned(),
description: json_value
.get("description")
.and_then(|v| v.as_str())
.map(String::from),
}
} else if json_value.get("openapi").is_some() || json_value.get("paths").is_some() {
SuggestionInput::PartialSpec { spec: json_value }
} else if let Some(paths_array) = json_value.get("paths").and_then(|v| v.as_array()) {
let paths = paths_array.iter().filter_map(|v| v.as_str().map(String::from)).collect();
SuggestionInput::Paths { paths }
} else {
return Err(anyhow::anyhow!(
"Unable to detect input type. Expected 'method' field for endpoint, \
'openapi' for spec, or 'paths' array"
));
}
} else {
return Err(anyhow::anyhow!(
"Must provide either --from <file> or --from-description <text>"
));
};
println!("🤖 Generating API specification suggestions...");
println!(" Provider: {}", llm_provider);
println!(" Model: {}", suggestion_config.llm_config.model);
println!(" Suggestions: {}", num_suggestions);
if let Some(ref d) = suggestion_config.domain_hint {
println!(" Domain: {}", d);
}
println!();
let engine = SpecSuggestionEngine::new(suggestion_config);
let result = engine.suggest(&input).await?;
if print_json {
println!("{}", serde_json::to_string_pretty(&result)?);
} else {
println!("✅ Generated {} endpoint suggestions", result.metadata.endpoint_count);
if let Some(domain) = &result.metadata.detected_domain {
println!(" Detected domain: {}", domain);
}
println!();
println!("📝 Suggested Endpoints:");
for (i, suggestion) in result.suggestions.iter().enumerate() {
println!("\n{}. {} {}", i + 1, suggestion.method, suggestion.path);
println!(" {}", suggestion.description);
if !suggestion.parameters.is_empty() {
println!(" Parameters:");
for param in &suggestion.parameters {
let req = if param.required {
"required"
} else {
"optional"
};
println!(
" - {} ({}): {} [{}]",
param.name, param.location, param.data_type, req
);
}
}
if !suggestion.reasoning.is_empty() {
println!(" 💡 {}", suggestion.reasoning);
}
}
println!();
if let Some(base_path) = output {
match output_format {
OutputFormat::OpenAPI => {
if let Some(spec) = &result.openapi_spec {
let yaml = serde_yaml::to_string(spec)?;
tokio::fs::write(&base_path, yaml).await?;
println!("✅ OpenAPI spec saved to: {}", base_path.display());
} else {
println!("⚠️ No OpenAPI spec generated");
}
}
OutputFormat::MockForge => {
if let Some(config) = &result.mockforge_config {
let yaml = serde_yaml::to_string(config)?;
tokio::fs::write(&base_path, yaml).await?;
println!("✅ MockForge config saved to: {}", base_path.display());
} else {
println!("⚠️ No MockForge config generated");
}
}
OutputFormat::Both => {
let openapi_path = base_path.with_extension("openapi.yaml");
let mockforge_path = base_path.with_extension("mockforge.yaml");
if let Some(spec) = &result.openapi_spec {
let yaml = serde_yaml::to_string(spec)?;
tokio::fs::write(&openapi_path, yaml).await?;
println!("✅ OpenAPI spec saved to: {}", openapi_path.display());
}
if let Some(config) = &result.mockforge_config {
let yaml = serde_yaml::to_string(config)?;
tokio::fs::write(&mockforge_path, yaml).await?;
println!("✅ MockForge config saved to: {}", mockforge_path.display());
}
}
}
} else {
println!("💡 Tip: Use --output <file> to save the generated specification");
}
}
Ok(())
}