use std::path::PathBuf;
use swarm_engine_core::agent::{DefaultBatchManagerAgent, ManagerId};
use swarm_engine_core::config::PathResolver;
use swarm_engine_core::learn::{ProfileStore, ScenarioProfileId};
use swarm_engine_eval::prelude::EvalRunner;
use swarm_engine_eval::scenario::{EvalScenario, LlmProvider};
use swarm_engine_llm::{create_llm_invoker, OllamaDecider};
#[allow(clippy::too_many_arguments)]
pub fn cmd_run(
task: String,
scenario_path: PathBuf,
working_dir: Option<PathBuf>,
env_override: Option<String>,
max_ticks_override: Option<u64>,
verbose: bool,
variant: Option<String>,
learning: bool,
skip_dep_graph: bool,
overrides: Vec<String>,
profile_id: Option<String>,
provider_kind: &str,
) {
use swarm_engine_eval::config::DependencyProviderKind;
use swarm_engine_eval::runtime::RuntimeTaskSpec;
if verbose {
use tracing_subscriber::EnvFilter;
let filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new("swarm_engine_llm=info,swarm_engine_core=info"));
tracing_subscriber::fmt()
.with_env_filter(filter)
.with_target(true)
.compact()
.init();
}
if !scenario_path.exists() {
eprintln!("Scenario file not found: {}", scenario_path.display());
std::process::exit(1);
}
let content = match std::fs::read_to_string(&scenario_path) {
Ok(c) => c,
Err(e) => {
eprintln!("Failed to read scenario file: {}", e);
std::process::exit(1);
}
};
let base_scenario: EvalScenario = match toml::from_str(&content) {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to parse scenario TOML: {}", e);
std::process::exit(1);
}
};
let mut scenario = if let Some(ref variant_name) = variant {
match base_scenario.with_variant(variant_name) {
Some(s) => s,
None => {
eprintln!("Variant '{}' not found.", variant_name);
eprintln!("Available variants: {:?}", base_scenario.variant_names());
std::process::exit(1);
}
}
} else {
base_scenario
};
for kv in &overrides {
if let Some((key, value)) = kv.split_once('=') {
apply_override(&mut scenario, key.trim(), value.trim());
} else {
eprintln!(
"Warning: Invalid override format '{}', expected key=value",
kv
);
}
}
let mut spec = RuntimeTaskSpec::from(scenario).with_task(&task);
if let Some(ref env_type) = env_override {
spec = spec.with_env_type(env_type);
}
if let Some(max_ticks) = max_ticks_override {
spec = spec.with_max_ticks(max_ticks);
}
if let Some(ref dir) = working_dir {
spec = spec.with_working_dir(dir);
}
println!("=== SwarmEngine Run ===");
println!("Task: {}", task);
println!("Scenario: {}", scenario_path.display());
if let Some(ref v) = variant {
println!("Variant: {}", v);
}
if !overrides.is_empty() {
println!("Overrides: {:?}", overrides);
}
println!("Environment: {}", spec.environment.env_type);
println!("Max ticks: {}", spec.app_config.max_ticks);
println!("Working dir: {}", spec.resolved_working_dir().display());
println!(
"Learning: {}",
if learning { "enabled" } else { "disabled" }
);
if let Some(ref pid) = profile_id {
println!("Profile: {}", pid);
}
if skip_dep_graph {
println!("Dep graph: skipped");
}
println!();
println!("=== Config ===");
let worker_count: usize = spec.agents.workers.iter().map(|w| w.count).sum();
println!("Workers: {}", worker_count);
println!("LLM: {:?} ({})", spec.llm.provider, spec.llm.model);
println!("Actions: {:?}", spec.actions.action_names());
println!();
let scenario = spec.into_eval_scenario();
let rt = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime");
let handle = rt.handle().clone();
if scenario.llm.provider.requires_endpoint() {
let endpoint = scenario
.llm
.endpoint
.clone()
.or_else(|| scenario.llm.provider.default_endpoint().map(String::from))
.unwrap_or_else(|| "http://localhost:8080".to_string());
println!("Checking LLM server health...");
let health_url = match scenario.llm.provider {
LlmProvider::Ollama => format!("{}/api/tags", endpoint),
LlmProvider::LlamaCppServer => format!("{}/health", endpoint),
_ => format!("{}/health", endpoint),
};
let health_result = rt.block_on(async {
let client = reqwest::Client::new();
client
.get(&health_url)
.timeout(std::time::Duration::from_secs(5))
.send()
.await
});
match health_result {
Ok(resp) if resp.status().is_success() => {
println!(" \x1b[32m✓ LLM server is healthy ({})\x1b[0m", endpoint);
}
Ok(resp) => {
eprintln!(
"\x1b[31m✗ LLM server returned error: {} ({})\x1b[0m",
resp.status(),
health_url
);
eprintln!(" Hint: Start the server with 'swarm-engine llama start -m <model>'");
std::process::exit(1);
}
Err(e) => {
eprintln!("\x1b[31m✗ LLM server is not responding: {}\x1b[0m", e);
eprintln!(" Endpoint: {}", health_url);
eprintln!(" Hint: Start the server with 'swarm-engine llama start -m <model>'");
std::process::exit(1);
}
}
println!();
}
println!("=== Executing Task ===");
let runner = match scenario.llm.provider {
LlmProvider::Ollama => {
let llm_config = scenario
.llm
.to_ollama_config(scenario.batch_processor.max_concurrency);
EvalRunner::new(scenario, handle.clone())
.with_runs(1)
.with_seed(42)
.with_verbose(verbose)
.with_manager_factory(|| Box::new(DefaultBatchManagerAgent::new(ManagerId(0))))
.with_batch_invoker_factory(move || {
let decider = OllamaDecider::new(llm_config.clone());
Box::new(create_llm_invoker(decider, handle.clone()))
})
}
LlmProvider::LlamaCppServer => {
use swarm_engine_llm::{LlamaCppServerConfig, LlamaCppServerDecider};
let endpoint = scenario
.llm
.endpoint
.clone()
.unwrap_or_else(|| "http://localhost:8080".to_string());
let chat_template = scenario.llm.to_chat_template();
let server_config = LlamaCppServerConfig::new(endpoint)
.with_model_name(&scenario.llm.model)
.with_max_tokens(scenario.llm.max_tokens.unwrap_or(256))
.with_temperature(scenario.llm.temperature)
.with_chat_template(chat_template);
let decider = LlamaCppServerDecider::new(server_config)
.expect("Failed to create LlamaCppServerDecider");
EvalRunner::new(scenario, handle.clone())
.with_runs(1)
.with_seed(42)
.with_verbose(verbose)
.with_manager_factory(|| Box::new(DefaultBatchManagerAgent::new(ManagerId(0))))
.with_batch_invoker_factory(move || {
Box::new(create_llm_invoker(decider.clone(), handle.clone()))
})
}
_ => {
eprintln!(
"Error: Provider {:?} is not supported for run command",
scenario.llm.provider
);
eprintln!("Use 'ollama' or 'llama-server' instead");
std::process::exit(1);
}
};
let runner = if learning {
let learning_path = PathResolver::user_data_dir().join("learning");
runner.with_learning_store(&learning_path)
} else {
runner
};
let runner = if let Some(ref pid) = profile_id {
let profiles_dir = PathResolver::user_data_dir().join("profiles");
let store = ProfileStore::new(&profiles_dir);
let profile_id = ScenarioProfileId(pid.clone());
match store.load(&profile_id) {
Ok(profile) => {
println!("Loading profile: {} (state: {:?})", pid, profile.state);
runner.with_scenario_profile(profile)
}
Err(e) => {
eprintln!(
"\x1b[31mError: Failed to load profile '{}': {}\x1b[0m",
pid, e
);
std::process::exit(1);
}
}
} else {
runner
};
let runner = runner.skip_learned_action_order(skip_dep_graph);
let provider_kind_enum = match provider_kind {
"learned" => DependencyProviderKind::Learned,
_ => DependencyProviderKind::Smart, };
let runner = runner.with_dependency_provider_kind(provider_kind_enum);
let report = match runner.run() {
Ok(r) => r,
Err(e) => {
eprintln!("\x1b[31mExecution failed: {}\x1b[0m", e);
std::process::exit(1);
}
};
println!();
println!("=== Result ===");
if let Some(run) = report.runs.first() {
let status = if run.success {
"\x1b[32mSUCCESS\x1b[0m"
} else {
"\x1b[31mFAILED\x1b[0m"
};
println!("Status: {}", status);
println!("Termination: {:?}", run.termination_reason);
println!();
println!("--- Metrics ---");
println!("Ticks: {}", run.metrics.task.total_ticks);
println!(
"Actions: {} / {} (success rate: {:.1}%)",
run.metrics.task.successful_actions,
run.metrics.task.total_actions,
run.metrics.task.success_rate * 100.0
);
println!(
"Duration: {:.2}s",
run.metrics.performance.total_duration_ms / 1000.0
);
println!(
"Throughput: {:.2} actions/sec (effective: {:.2})",
run.metrics.performance.raw_throughput_per_sec,
run.metrics.performance.effective_throughput_per_sec
);
if run.metrics.performance.llm_invocations > 0 {
println!(
"LLM invocations: {} (errors: {}, rate: {:.1}%)",
run.metrics.performance.llm_invocations,
run.metrics.performance.llm_invoke_errors,
run.metrics.performance.llm_error_rate * 100.0
);
}
}
}
fn apply_override(scenario: &mut EvalScenario, key: &str, value: &str) {
match key {
"llm.model" => {
scenario.llm.model = value.to_string();
}
"llm.endpoint" => {
scenario.llm.endpoint = Some(value.to_string());
}
"llm.temperature" => {
if let Ok(temp) = value.parse::<f32>() {
scenario.llm.temperature = temp;
} else {
eprintln!(
"Warning: Invalid temperature value '{}', expected float",
value
);
}
}
"llm.max_tokens" => {
if let Ok(tokens) = value.parse::<usize>() {
scenario.llm.max_tokens = Some(tokens);
} else {
eprintln!(
"Warning: Invalid max_tokens value '{}', expected integer",
value
);
}
}
"max_ticks" => {
if let Ok(ticks) = value.parse::<u64>() {
scenario.app_config.max_ticks = ticks;
} else {
eprintln!(
"Warning: Invalid max_ticks value '{}', expected integer",
value
);
}
}
"workers" => {
if let Ok(count) = value.parse::<usize>() {
if let Some(first) = scenario.agents.workers.first_mut() {
first.count = count;
}
} else {
eprintln!(
"Warning: Invalid workers value '{}', expected integer",
value
);
}
}
"managers" => {
if let Ok(count) = value.parse::<usize>() {
if let Some(first) = scenario.agents.managers.first_mut() {
first.count = count;
}
} else {
eprintln!(
"Warning: Invalid managers value '{}', expected integer",
value
);
}
}
_ => {
eprintln!("Warning: Unknown override key '{}'. Supported: llm.model, llm.endpoint, llm.temperature, llm.max_tokens, max_ticks, workers, managers", key);
}
}
}