swarm-engine-ui 0.1.6

CLI and Desktop UI for SwarmEngine
//! SwarmEngine CLI & Desktop UI
//!
//! Command-line interface and desktop application for SwarmEngine.
//!
//! ## Usage
//!
//! ```bash
//! # ヘルプ表示
//! swarm-engine --help
//!
//! # GUI起動
//! swarm-engine --gui
//!
//! # 評価実行
//! swarm-engine eval scenarios/troubleshooting.toml -n 5 -v
//!
//! # 初期化
//! swarm-engine init
//! swarm-engine init --project
//!
//! # 設定確認
//! swarm-engine config
//!
//! # ディレクトリを開く
//! swarm-engine open scenarios
//! swarm-engine open reports
//! ```

mod cmd;
mod demo_agents;
mod gui;

use std::path::PathBuf;

use clap::{Parser, Subcommand};

use cmd::{
    cmd_config, cmd_eval, cmd_init, cmd_learn, cmd_llama, cmd_lora, cmd_open, cmd_profile, cmd_run,
    LearnAction, LlamaAction, LoraAction, OpenTarget, ProfileAction,
};
use gui::run_gui;

#[derive(Parser)]
#[command(name = "swarm-engine")]
#[command(about = "SwarmEngine - High-throughput Agent Swarm Engine")]
#[command(version)]
struct Cli {
    /// Launch desktop GUI
    #[arg(long)]
    gui: bool,

    #[command(subcommand)]
    command: Option<Commands>,
}

#[derive(Subcommand)]
enum Commands {
    /// Initialize SwarmEngine directories and config
    Init {
        /// Initialize project-local config instead of global
        #[arg(long)]
        project: bool,
    },
    /// Show current configuration
    Config,
    /// Open a directory in file manager
    Open {
        /// Directory to open: scenarios, reports, config, data
        target: OpenTarget,
    },
    /// Run a task directly (without full evaluation)
    Run {
        /// Task description (what you want to accomplish)
        task: String,

        /// Path to scenario TOML file (provides actions, LLM config, etc.)
        #[arg(short, long)]
        scenario: PathBuf,

        /// Working directory (default: current directory)
        #[arg(short = 'd', long)]
        dir: Option<PathBuf>,

        /// Override environment type (e.g., "default", "realworld")
        #[arg(short = 'e', long)]
        env: Option<String>,

        /// Maximum ticks before timeout (default: from scenario)
        #[arg(short = 'n', long)]
        max_ticks: Option<u64>,

        /// Verbose output (show LLM debug events)
        #[arg(short, long)]
        verbose: bool,

        /// Variant name to apply (if scenario has variants)
        #[arg(long)]
        variant: Option<String>,

        /// Disable cross-session learning
        #[arg(long)]
        no_learning: bool,

        /// Skip learned dependency graph (action order) from offline model
        #[arg(long)]
        no_dep_graph: bool,

        /// Override scenario properties (key=value, can be specified multiple times)
        /// Examples: --set llm.model=gpt-4 --set max_ticks=100 --set workers=4
        #[arg(long = "set", value_name = "KEY=VALUE")]
        overrides: Vec<String>,

        /// Use ScenarioProfile for learned parameters (exploration, strategy, action order)
        #[arg(long)]
        profile: Option<String>,

        /// Dependency provider kind: "smart" (default) or "learned"
        #[arg(long, default_value = "smart")]
        provider: String,
    },
    /// Run evaluation scenario
    Eval {
        /// Path to scenario TOML file
        scenario: PathBuf,

        /// Number of runs (default: 1)
        #[arg(short = 'n', long, default_value = "1")]
        runs: usize,

        /// Random seed for reproducibility
        #[arg(short, long, default_value = "42")]
        seed: u64,

        /// Output JSON report to file (single file)
        #[arg(short, long)]
        output: Option<PathBuf>,

        /// Custom output directory for logs (default: ~/.swarm-engine/eval/logs)
        #[arg(long)]
        output_dir: Option<PathBuf>,

        /// Disable automatic log saving
        #[arg(long)]
        no_log: bool,

        /// Verbose output (print tick snapshots)
        #[arg(short, long)]
        verbose: bool,

        /// Variant name to run (if scenario has variants)
        #[arg(long)]
        variant: Option<String>,

        /// List available variants and exit
        #[arg(long)]
        list_variants: bool,

        /// Disable cross-session learning (enabled by default)
        #[arg(long)]
        no_learning: bool,

        /// Dependency provider kind: "smart" (default) or "learned"
        #[arg(long, default_value = "smart")]
        provider: String,
    },
    /// Manage llama-server (start, stop, logs, status)
    Llama {
        #[command(subcommand)]
        action: LlamaAction,
    },
    /// Run offline learning on collected session data
    Learn {
        #[command(subcommand)]
        action: LearnAction,
    },
    /// LoRA fine-tuning tools (setup, train, convert)
    Lora {
        #[command(subcommand)]
        action: LoraAction,
    },
    /// Manage ScenarioProfiles (add, list, bootstrap, delete)
    Profile {
        #[command(subcommand)]
        action: ProfileAction,
    },
}

fn main() {
    let cli = Cli::parse();

    // --gui flag takes precedence
    if cli.gui {
        run_gui();
        return;
    }

    match cli.command {
        Some(Commands::Init { project }) => cmd_init(project),
        Some(Commands::Config) => cmd_config(),
        Some(Commands::Open { target }) => cmd_open(target),
        Some(Commands::Run {
            task,
            scenario,
            dir,
            env,
            max_ticks,
            verbose,
            variant,
            no_learning,
            no_dep_graph,
            overrides,
            profile,
            provider,
        }) => cmd_run(
            task,
            scenario,
            dir,
            env,
            max_ticks,
            verbose,
            variant,
            !no_learning,
            no_dep_graph,
            overrides,
            profile,
            &provider,
        ),
        Some(Commands::Eval {
            scenario,
            runs,
            seed,
            output,
            output_dir,
            no_log,
            verbose,
            variant,
            list_variants,
            no_learning,
            provider,
        }) => cmd_eval(
            scenario,
            runs,
            seed,
            output,
            output_dir,
            no_log,
            verbose,
            variant,
            list_variants,
            !no_learning, // learning enabled by default
            &provider,
        ),
        Some(Commands::Llama { action }) => cmd_llama(action),
        Some(Commands::Learn { action }) => cmd_learn(action),
        Some(Commands::Lora { action }) => cmd_lora(action),
        Some(Commands::Profile { action }) => cmd_profile(action),
        None => {
            // No command and no --gui: show help
            println!("SwarmEngine - High-throughput Agent Swarm Engine\n");
            println!("Usage: swarm-engine [OPTIONS] <COMMAND>\n");
            println!("Commands:");
            println!("  playground  Iterative learning loop (eval → train → reload)");
            println!("  eval        Run evaluation scenario");
            println!("  profile     Manage ScenarioProfiles (add, list, bootstrap, delete)");
            println!("  learn       Run offline learning on collected data");
            println!("  lora        LoRA fine-tuning tools (setup, train, convert)");
            println!("  llama       Manage llama-server (start, stop, logs, status)");
            println!("  init        Initialize SwarmEngine directories and config");
            println!("  config      Show current configuration");
            println!("  open        Open a directory in file manager");
            println!();
            println!("Options:");
            println!("  --gui   Launch desktop GUI");
            println!("  --help  Print help");
            println!();
            println!("Examples:");
            println!("  swarm-engine --gui");
            println!("  swarm-engine playground scenario.toml -n 20 --train-every 5");
            println!("  swarm-engine eval scenarios/troubleshooting.toml -n 5");
            println!("  swarm-engine learn \"Service Troubleshooting\" -n 10");
            println!("  swarm-engine lora setup && swarm-engine lora train");
            println!("  swarm-engine llama start -m model.gguf");
        }
    }
}