harn-cli 0.8.1

CLI for the Harn programming language — run, test, REPL, format, and lint
Documentation
use clap::{Args, Subcommand};

#[derive(Debug, Args)]
pub(crate) struct ModelsArgs {
    #[command(subcommand)]
    pub command: ModelsCommand,
}

#[derive(Debug, Subcommand)]
pub(crate) enum ModelsCommand {
    /// List models grouped by provider.
    List(ModelsListArgs),
    /// Pull a model via Ollama.
    Install(ModelsInstallArgs),
    /// Recommend a starter model for the current machine and credentials.
    Recommend(ModelRecommendArgs),
    /// Round-trip a small prompt through a model and report timing, tokens, and cost.
    Test(ModelsTestArgs),
}

#[derive(Debug, Args)]
pub(crate) struct ModelsListArgs {
    /// Restrict to a single provider.
    #[arg(long)]
    pub provider: Option<String>,
    /// Emit JSON instead of a human table.
    #[arg(long)]
    pub json: bool,
    /// Only show locally-installed (Ollama) models.
    #[arg(long = "installed-only")]
    pub installed_only: bool,
}

#[derive(Debug, Args)]
pub(crate) struct ModelsInstallArgs {
    /// Model id to pull (e.g. `llama3.2`, `qwen2.5:7b`).
    pub model: String,
    /// Skip the size-confirmation prompt.
    #[arg(long)]
    pub yes: bool,
    /// Optional Ollama keep-alive hint (e.g. `5m`, `1h`).
    #[arg(long = "keep-alive", value_name = "VALUE")]
    pub keep_alive: Option<String>,
}

#[derive(Debug, Args)]
pub(crate) struct ModelRecommendArgs {
    /// Emit the recommendation and hardware snapshot as JSON.
    #[arg(long)]
    pub json: bool,
}

#[derive(Debug, Args)]
pub(crate) struct ModelsTestArgs {
    /// Model alias or provider-native model id.
    pub model: String,
    /// Prompt text to send to the model.
    #[arg(long, default_value = "Reply with the word pong.")]
    pub prompt: String,
    /// Provider id to use instead of inferring one from the model selector.
    #[arg(long)]
    pub provider: Option<String>,
    /// Emit a structured JSON result.
    #[arg(long, default_value_t = false)]
    pub json: bool,
}