1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
use clap::{Args, Subcommand};
#[derive(Debug, Args)]
pub(crate) struct ModelsArgs {
#[command(subcommand)]
pub command: ModelsCommand,
}
#[derive(Debug, Subcommand)]
pub(crate) enum ModelsCommand {
/// List models grouped by provider.
List(ModelsListArgs),
/// Pull a model via Ollama.
Install(ModelsInstallArgs),
/// Recommend a starter model for the current machine and credentials.
Recommend(ModelRecommendArgs),
/// Round-trip a small prompt through a model and report timing, tokens, and cost.
Test(ModelsTestArgs),
}
#[derive(Debug, Args)]
pub(crate) struct ModelsListArgs {
/// Restrict to a single provider.
#[arg(long)]
pub provider: Option<String>,
/// Emit JSON instead of a human table.
#[arg(long)]
pub json: bool,
/// Only show locally-installed (Ollama) models.
#[arg(long = "installed-only")]
pub installed_only: bool,
}
#[derive(Debug, Args)]
pub(crate) struct ModelsInstallArgs {
/// Model id to pull (e.g. `llama3.2`, `qwen2.5:7b`).
pub model: String,
/// Skip the size-confirmation prompt.
#[arg(long)]
pub yes: bool,
/// Optional Ollama keep-alive hint (e.g. `5m`, `1h`).
#[arg(long = "keep-alive", value_name = "VALUE")]
pub keep_alive: Option<String>,
}
#[derive(Debug, Args)]
pub(crate) struct ModelRecommendArgs {
/// Emit the recommendation and hardware snapshot as JSON.
#[arg(long)]
pub json: bool,
}
#[derive(Debug, Args)]
pub(crate) struct ModelsTestArgs {
/// Model alias or provider-native model id.
pub model: String,
/// Prompt text to send to the model.
#[arg(long, default_value = "Reply with the word pong.")]
pub prompt: String,
/// Provider id to use instead of inferring one from the model selector.
#[arg(long)]
pub provider: Option<String>,
/// Emit a structured JSON result.
#[arg(long, default_value_t = false)]
pub json: bool,
}