Skip to main content

mermaid_cli/app/
config.rs

1use crate::constants::DEFAULT_OLLAMA_PORT;
2use crate::prompts;
3use anyhow::{Context, Result};
4use directories::ProjectDirs;
5use serde::{Deserialize, Serialize};
6use std::path::PathBuf;
7
8/// Main configuration structure
9#[derive(Debug, Clone, Serialize, Deserialize)]
10pub struct Config {
11    /// Last used model (persisted between sessions)
12    #[serde(default)]
13    pub last_used_model: Option<String>,
14
15    /// Default model configuration
16    #[serde(default)]
17    pub default_model: ModelSettings,
18
19    /// Ollama configuration
20    #[serde(default)]
21    pub ollama: OllamaConfig,
22
23    /// OpenAI configuration (for direct OpenAI API access)
24    #[serde(default)]
25    pub openai: OpenAIConfig,
26
27    /// Anthropic configuration (for direct Anthropic API access)
28    #[serde(default)]
29    pub anthropic: AnthropicConfig,
30
31    /// UI configuration
32    #[serde(default)]
33    pub ui: UIConfig,
34
35    /// Context loader configuration
36    #[serde(default)]
37    pub context: ContextConfig,
38
39    /// Operation mode configuration
40    #[serde(default)]
41    pub mode: ModeConfig,
42
43    /// Behavior configuration (auto-install, etc.)
44    #[serde(default)]
45    pub behavior: BehaviorConfig,
46
47    /// Non-interactive mode configuration
48    #[serde(default)]
49    pub non_interactive: NonInteractiveConfig,
50}
51
52impl Default for Config {
53    fn default() -> Self {
54        Self {
55            last_used_model: None,
56            default_model: ModelSettings::default(),
57            ollama: OllamaConfig::default(),
58            openai: OpenAIConfig::default(),
59            anthropic: AnthropicConfig::default(),
60            ui: UIConfig::default(),
61            context: ContextConfig::default(),
62            mode: ModeConfig::default(),
63            behavior: BehaviorConfig::default(),
64            non_interactive: NonInteractiveConfig::default(),
65        }
66    }
67}
68
69/// Default model settings
70#[derive(Debug, Clone, Serialize, Deserialize)]
71#[serde(default)]
72pub struct ModelSettings {
73    /// Model provider (ollama, openai, anthropic)
74    pub provider: String,
75    /// Model name
76    pub name: String,
77    /// Temperature for generation
78    pub temperature: f32,
79    /// Maximum tokens to generate
80    pub max_tokens: usize,
81    /// System prompt
82    pub system_prompt: Option<String>,
83}
84
85impl ModelSettings {
86    /// Default system prompt that teaches models how to use Mermaid's action blocks
87    pub fn default_system_prompt() -> String {
88        prompts::get_system_prompt()
89    }
90}
91
92impl Default for ModelSettings {
93    fn default() -> Self {
94        Self {
95            provider: String::new(),
96            name: String::new(),
97            temperature: 0.7,
98            max_tokens: 4096,
99            system_prompt: Some(Self::default_system_prompt()),
100        }
101    }
102}
103
104/// Ollama configuration
105#[derive(Debug, Clone, Serialize, Deserialize)]
106#[serde(default)]
107pub struct OllamaConfig {
108    /// Ollama server host
109    pub host: String,
110    /// Ollama server port
111    pub port: u16,
112    /// Ollama cloud API key (for :cloud models)
113    /// Set this to use Ollama's cloud inference service
114    /// Get your key at: https://ollama.com/cloud
115    pub cloud_api_key: Option<String>,
116    /// Number of GPU layers to offload (None = auto, 0 = CPU only, positive = specific count)
117    /// Lower values free up VRAM for larger models at the cost of speed
118    pub num_gpu: Option<i32>,
119    /// Number of CPU threads for processing offloaded layers
120    /// Higher values improve CPU inference speed for large models
121    pub num_thread: Option<i32>,
122    /// Context window size (number of tokens)
123    /// Larger values allow longer conversations but use more memory
124    pub num_ctx: Option<i32>,
125    /// Enable NUMA optimization for multi-CPU systems
126    pub numa: Option<bool>,
127}
128
129impl Default for OllamaConfig {
130    fn default() -> Self {
131        Self {
132            host: String::from("localhost"),
133            port: DEFAULT_OLLAMA_PORT,
134            cloud_api_key: None,
135            num_gpu: None,    // Let Ollama auto-detect
136            num_thread: None, // Let Ollama auto-detect
137            num_ctx: None,    // Use model default
138            numa: None,       // Auto-detect
139        }
140    }
141}
142
143/// OpenAI configuration
144#[derive(Debug, Clone, Serialize, Deserialize)]
145#[serde(default)]
146pub struct OpenAIConfig {
147    /// Environment variable containing API key
148    pub api_key_env: String,
149    /// Organization ID (optional)
150    pub organization: Option<String>,
151}
152
153impl Default for OpenAIConfig {
154    fn default() -> Self {
155        Self {
156            api_key_env: String::from("OPENAI_API_KEY"),
157            organization: None,
158        }
159    }
160}
161
162/// Anthropic configuration
163#[derive(Debug, Clone, Serialize, Deserialize)]
164#[serde(default)]
165pub struct AnthropicConfig {
166    /// Environment variable containing API key
167    pub api_key_env: String,
168}
169
170impl Default for AnthropicConfig {
171    fn default() -> Self {
172        Self {
173            api_key_env: String::from("ANTHROPIC_API_KEY"),
174        }
175    }
176}
177
178/// UI configuration
179#[derive(Debug, Clone, Serialize, Deserialize)]
180#[serde(default)]
181pub struct UIConfig {
182    /// Color theme
183    pub theme: String,
184    /// Syntax highlighting theme
185    pub syntax_theme: String,
186    /// Show line numbers in code blocks
187    pub show_line_numbers: bool,
188    /// Show file sidebar by default
189    pub show_sidebar: bool,
190}
191
192impl Default for UIConfig {
193    fn default() -> Self {
194        Self {
195            theme: String::from("dark"),
196            syntax_theme: String::from("monokai"),
197            show_line_numbers: true,
198            show_sidebar: true,
199        }
200    }
201}
202
203/// Context loader configuration
204#[derive(Debug, Clone, Serialize, Deserialize)]
205#[serde(default)]
206pub struct ContextConfig {
207    /// Maximum file size to load (in bytes)
208    pub max_file_size: usize,
209    /// Maximum number of files to include
210    pub max_files: usize,
211    /// Maximum total context size in tokens
212    pub max_context_tokens: usize,
213    /// Auto-include these file patterns
214    pub include_patterns: Vec<String>,
215    /// Always exclude these patterns
216    pub exclude_patterns: Vec<String>,
217}
218
219impl Default for ContextConfig {
220    fn default() -> Self {
221        Self {
222            max_file_size: 1024 * 1024, // 1MB
223            max_files: 100,
224            max_context_tokens: 50000,
225            include_patterns: vec![],
226            exclude_patterns: vec![String::from("*.log"), String::from("*.tmp")],
227        }
228    }
229}
230
231/// Operation mode configuration
232#[derive(Debug, Clone, Serialize, Deserialize)]
233#[serde(default)]
234pub struct ModeConfig {
235    /// Default operation mode (normal, accept_edits, plan_mode, bypass_all)
236    pub default_mode: String,
237    /// Remember mode between sessions
238    pub remember_mode: bool,
239    /// Auto-commit in AcceptEdits mode
240    pub auto_commit_on_accept: bool,
241    /// Require double confirmation for destructive operations in BypassAll mode
242    pub require_destructive_confirmation: bool,
243}
244
245impl Default for ModeConfig {
246    fn default() -> Self {
247        Self {
248            default_mode: String::from("normal"),
249            remember_mode: false,
250            auto_commit_on_accept: false,
251            require_destructive_confirmation: true,
252        }
253    }
254}
255
256/// Behavior configuration (formerly CLI flags)
257#[derive(Debug, Clone, Serialize, Deserialize)]
258#[serde(default)]
259pub struct BehaviorConfig {
260    /// Automatically install missing Ollama models
261    pub auto_install_models: bool,
262    /// Preferred backend (ollama only)
263    pub backend: String,
264}
265
266impl Default for BehaviorConfig {
267    fn default() -> Self {
268        Self {
269            auto_install_models: true,
270            backend: String::from("auto"),
271        }
272    }
273}
274
275/// Non-interactive mode configuration
276#[derive(Debug, Clone, Serialize, Deserialize)]
277#[serde(default)]
278pub struct NonInteractiveConfig {
279    /// Output format (text, json, markdown)
280    pub output_format: String,
281    /// Maximum tokens to generate
282    pub max_tokens: usize,
283    /// Don't execute agent actions (dry run)
284    pub no_execute: bool,
285}
286
287impl Default for NonInteractiveConfig {
288    fn default() -> Self {
289        Self {
290            output_format: String::from("text"),
291            max_tokens: 4096,
292            no_execute: false,
293        }
294    }
295}
296
297/// Load configuration from single config file
298/// Priority: config file > defaults (that's it - no merging, no env vars)
299pub fn load_config() -> Result<Config> {
300    let config_path = get_config_path()?;
301
302    if config_path.exists() {
303        let toml_str = std::fs::read_to_string(&config_path)
304            .with_context(|| format!("Failed to read {}", config_path.display()))?;
305        let config: Config = toml::from_str(&toml_str)
306            .with_context(|| format!("Failed to parse {}. Run 'mermaid init' to regenerate.", config_path.display()))?;
307        Ok(config)
308    } else {
309        Ok(Config::default())
310    }
311}
312
313/// Get the path to the single config file
314pub fn get_config_path() -> Result<PathBuf> {
315    Ok(get_config_dir()?.join("config.toml"))
316}
317
318/// Get the configuration directory
319pub fn get_config_dir() -> Result<PathBuf> {
320    if let Some(proj_dirs) = ProjectDirs::from("", "", "mermaid") {
321        let config_dir = proj_dirs.config_dir();
322        std::fs::create_dir_all(config_dir)?;
323        Ok(config_dir.to_path_buf())
324    } else {
325        // Fallback to home directory
326        let home = std::env::var("HOME")
327            .or_else(|_| std::env::var("USERPROFILE"))
328            .context("Could not determine home directory")?;
329        let config_dir = PathBuf::from(home).join(".config").join("mermaid");
330        std::fs::create_dir_all(&config_dir)?;
331        Ok(config_dir)
332    }
333}
334
335/// Save configuration to file
336pub fn save_config(config: &Config, path: Option<PathBuf>) -> Result<()> {
337    let path = if let Some(p) = path {
338        p
339    } else {
340        get_config_dir()?.join("config.toml")
341    };
342
343    let toml_string = toml::to_string_pretty(config)?;
344    std::fs::write(&path, toml_string)
345        .with_context(|| format!("Failed to write config to {}", path.display()))?;
346
347    Ok(())
348}
349
350/// Create a default configuration file if it doesn't exist
351pub fn init_config() -> Result<()> {
352    let config_file = get_config_path()?;
353
354    if config_file.exists() {
355        println!("Configuration already exists at: {}", config_file.display());
356    } else {
357        let default_config = Config::default();
358        save_config(&default_config, Some(config_file.clone()))?;
359        println!("Created configuration at: {}", config_file.display());
360    }
361
362    Ok(())
363}
364
365/// Persist the last used model to config file
366pub fn persist_last_model(model: &str) -> Result<()> {
367    let mut config = load_config().unwrap_or_default();
368    config.last_used_model = Some(model.to_string());
369    save_config(&config, None)
370}