mermaid_cli/cli/args.rs
1use clap::{Parser, Subcommand, ValueEnum};
2use std::path::PathBuf;
3
4#[derive(Parser, Debug)]
5#[command(name = "mermaid")]
6#[command(version = "0.1.0")]
7#[command(about = "An open-source, model-agnostic AI pair programmer", long_about = None)]
8pub struct Cli {
9 /// Model to use (e.g., ollama/codellama, openai/gpt-4, anthropic/claude-3)
10 #[arg(short, long)]
11 pub model: Option<String>,
12
13 /// Path to configuration file
14 #[arg(short, long)]
15 pub config: Option<PathBuf>,
16
17 /// Number of GPU layers for Ollama (overrides config)
18 /// Lower values offload more layers to CPU/RAM, enabling larger models
19 #[arg(long)]
20 pub num_gpu: Option<i32>,
21
22 /// Number of CPU threads for Ollama (overrides config)
23 /// Higher values improve CPU inference speed for offloaded layers
24 #[arg(long)]
25 pub num_thread: Option<i32>,
26
27 /// Context window size for Ollama (overrides config)
28 #[arg(long)]
29 pub num_ctx: Option<i32>,
30
31 /// Enable NUMA optimization for Ollama (overrides config)
32 #[arg(long)]
33 pub numa: Option<bool>,
34
35 /// Verbose output
36 #[arg(short, long)]
37 pub verbose: bool,
38
39 /// Project directory (defaults to current directory)
40 #[arg(short, long)]
41 pub path: Option<PathBuf>,
42
43 /// Skip automatic model installation
44 #[arg(long)]
45 pub no_auto_install: bool,
46
47 /// Don't auto-start LiteLLM proxy
48 #[arg(long)]
49 pub no_auto_proxy: bool,
50
51 /// Stop LiteLLM proxy on exit
52 #[arg(long)]
53 pub stop_proxy_on_exit: bool,
54
55 /// Reuse a previous conversation in this directory (shows selection UI)
56 #[arg(long, conflicts_with = "continue")]
57 pub resume: bool,
58
59 /// Continue the last conversation in this directory
60 #[arg(long, name = "continue", conflicts_with = "resume")]
61 pub continue_conversation: bool,
62
63 /// Non-interactive prompt to execute
64 #[arg(short = 'P', long, conflicts_with_all = &["resume", "continue"])]
65 pub prompt: Option<String>,
66
67 /// Output format for non-interactive mode
68 #[arg(long, value_enum, default_value_t = OutputFormat::Text, requires = "prompt")]
69 pub output_format: OutputFormat,
70
71 /// Maximum tokens to generate in response (non-interactive mode)
72 #[arg(long, requires = "prompt")]
73 pub max_tokens: Option<usize>,
74
75 /// Don't execute agent actions automatically (non-interactive mode)
76 #[arg(long, requires = "prompt")]
77 pub no_execute: bool,
78
79 /// Explicitly select backend (ollama or vllm)
80 /// If not specified, auto-detects based on model availability
81 #[arg(long)]
82 pub backend: Option<String>,
83
84 /// List all available models across all backends
85 #[arg(long)]
86 pub list_all_models: bool,
87
88 /// Show available backends
89 #[arg(long)]
90 pub backends: bool,
91
92 #[command(subcommand)]
93 pub command: Option<Commands>,
94}
95
96#[derive(Subcommand, Debug)]
97pub enum Commands {
98 /// Initialize configuration
99 Init,
100 /// List available models
101 List,
102 /// Start a chat session (default)
103 Chat,
104 /// Show version information
105 Version,
106 /// Check status of dependencies
107 Status,
108}
109
110#[derive(Debug, Clone, ValueEnum)]
111pub enum OutputFormat {
112 /// Plain text output
113 Text,
114 /// JSON structured output
115 Json,
116 /// Markdown formatted output
117 Markdown,
118}