Skip to main content

mermaid_cli/app/
config.rs

1use crate::constants::{DEFAULT_MAX_TOKENS, DEFAULT_OLLAMA_PORT, DEFAULT_TEMPERATURE};
2use crate::prompts;
3use anyhow::{Context, Result};
4use directories::ProjectDirs;
5use serde::{Deserialize, Serialize};
6use std::path::PathBuf;
7
8/// Main configuration structure
9#[derive(Debug, Clone, Default, Serialize, Deserialize)]
10pub struct Config {
11    /// Last used model (persisted between sessions)
12    #[serde(default)]
13    pub last_used_model: Option<String>,
14
15    /// Default model configuration
16    #[serde(default)]
17    pub default_model: ModelSettings,
18
19    /// Ollama configuration
20    #[serde(default)]
21    pub ollama: OllamaConfig,
22
23    /// OpenAI configuration (for direct OpenAI API access)
24    #[serde(default)]
25    pub openai: OpenAIConfig,
26
27    /// Anthropic configuration (for direct Anthropic API access)
28    #[serde(default)]
29    pub anthropic: AnthropicConfig,
30
31    /// UI configuration
32    #[serde(default)]
33    pub ui: UIConfig,
34
35    /// Operation mode configuration
36    #[serde(default)]
37    pub mode: ModeConfig,
38
39    /// Behavior configuration (auto-install, etc.)
40    #[serde(default)]
41    pub behavior: BehaviorConfig,
42
43    /// Non-interactive mode configuration
44    #[serde(default)]
45    pub non_interactive: NonInteractiveConfig,
46}
47
48/// Default model settings
49#[derive(Debug, Clone, Serialize, Deserialize)]
50#[serde(default)]
51pub struct ModelSettings {
52    /// Model provider (ollama, openai, anthropic)
53    pub provider: String,
54    /// Model name
55    pub name: String,
56    /// Temperature for generation
57    pub temperature: f32,
58    /// Maximum tokens to generate
59    pub max_tokens: usize,
60    /// System prompt
61    pub system_prompt: Option<String>,
62}
63
64impl ModelSettings {
65    /// Default system prompt that teaches models how to use Mermaid's action blocks
66    pub fn default_system_prompt() -> String {
67        prompts::get_system_prompt()
68    }
69}
70
71impl Default for ModelSettings {
72    fn default() -> Self {
73        Self {
74            provider: String::new(),
75            name: String::new(),
76            temperature: DEFAULT_TEMPERATURE,
77            max_tokens: DEFAULT_MAX_TOKENS,
78            system_prompt: Some(Self::default_system_prompt()),
79        }
80    }
81}
82
83/// Ollama configuration
84#[derive(Debug, Clone, Serialize, Deserialize)]
85#[serde(default)]
86pub struct OllamaConfig {
87    /// Ollama server host
88    pub host: String,
89    /// Ollama server port
90    pub port: u16,
91    /// Ollama cloud API key (for :cloud models)
92    /// Set this to use Ollama's cloud inference service
93    /// Get your key at: https://ollama.com/cloud
94    pub cloud_api_key: Option<String>,
95    /// Number of GPU layers to offload (None = auto, 0 = CPU only, positive = specific count)
96    /// Lower values free up VRAM for larger models at the cost of speed
97    pub num_gpu: Option<i32>,
98    /// Number of CPU threads for processing offloaded layers
99    /// Higher values improve CPU inference speed for large models
100    pub num_thread: Option<i32>,
101    /// Context window size (number of tokens)
102    /// Larger values allow longer conversations but use more memory
103    pub num_ctx: Option<i32>,
104    /// Enable NUMA optimization for multi-CPU systems
105    pub numa: Option<bool>,
106}
107
108impl Default for OllamaConfig {
109    fn default() -> Self {
110        Self {
111            host: String::from("localhost"),
112            port: DEFAULT_OLLAMA_PORT,
113            cloud_api_key: None,
114            num_gpu: None,    // Let Ollama auto-detect
115            num_thread: None, // Let Ollama auto-detect
116            num_ctx: None,    // Use model default
117            numa: None,       // Auto-detect
118        }
119    }
120}
121
122/// OpenAI configuration
123#[derive(Debug, Clone, Serialize, Deserialize)]
124#[serde(default)]
125pub struct OpenAIConfig {
126    /// Environment variable containing API key
127    pub api_key_env: String,
128    /// Organization ID (optional)
129    pub organization: Option<String>,
130}
131
132impl Default for OpenAIConfig {
133    fn default() -> Self {
134        Self {
135            api_key_env: String::from("OPENAI_API_KEY"),
136            organization: None,
137        }
138    }
139}
140
141/// Anthropic configuration
142#[derive(Debug, Clone, Serialize, Deserialize)]
143#[serde(default)]
144pub struct AnthropicConfig {
145    /// Environment variable containing API key
146    pub api_key_env: String,
147}
148
149impl Default for AnthropicConfig {
150    fn default() -> Self {
151        Self {
152            api_key_env: String::from("ANTHROPIC_API_KEY"),
153        }
154    }
155}
156
157/// UI configuration
158#[derive(Debug, Clone, Serialize, Deserialize)]
159#[serde(default)]
160pub struct UIConfig {
161    /// Color theme
162    pub theme: String,
163}
164
165impl Default for UIConfig {
166    fn default() -> Self {
167        Self {
168            theme: String::from("dark"),
169        }
170    }
171}
172
173/// Operation mode configuration
174#[derive(Debug, Clone, Serialize, Deserialize)]
175#[serde(default)]
176pub struct ModeConfig {
177    /// Default operation mode (normal, accept_edits, plan_mode, bypass_all)
178    pub default_mode: String,
179    /// Remember mode between sessions
180    pub remember_mode: bool,
181    /// Auto-commit in AcceptEdits mode
182    pub auto_commit_on_accept: bool,
183    /// Require double confirmation for destructive operations in BypassAll mode
184    pub require_destructive_confirmation: bool,
185}
186
187impl Default for ModeConfig {
188    fn default() -> Self {
189        Self {
190            default_mode: String::from("normal"),
191            remember_mode: false,
192            auto_commit_on_accept: false,
193            require_destructive_confirmation: true,
194        }
195    }
196}
197
198/// Behavior configuration (formerly CLI flags)
199#[derive(Debug, Clone, Serialize, Deserialize)]
200#[serde(default)]
201pub struct BehaviorConfig {
202    /// Automatically install missing Ollama models
203    pub auto_install_models: bool,
204    /// Preferred backend (ollama only)
205    pub backend: String,
206}
207
208impl Default for BehaviorConfig {
209    fn default() -> Self {
210        Self {
211            auto_install_models: true,
212            backend: String::from("auto"),
213        }
214    }
215}
216
217/// Non-interactive mode configuration
218#[derive(Debug, Clone, Serialize, Deserialize)]
219#[serde(default)]
220pub struct NonInteractiveConfig {
221    /// Output format (text, json, markdown)
222    pub output_format: String,
223    /// Maximum tokens to generate
224    pub max_tokens: usize,
225    /// Don't execute agent actions (dry run)
226    pub no_execute: bool,
227}
228
229impl Default for NonInteractiveConfig {
230    fn default() -> Self {
231        Self {
232            output_format: String::from("text"),
233            max_tokens: DEFAULT_MAX_TOKENS,
234            no_execute: false,
235        }
236    }
237}
238
239/// Load configuration from single config file
240/// Priority: config file > defaults (that's it - no merging, no env vars)
241pub fn load_config() -> Result<Config> {
242    let config_path = get_config_path()?;
243
244    if config_path.exists() {
245        let toml_str = std::fs::read_to_string(&config_path)
246            .with_context(|| format!("Failed to read {}", config_path.display()))?;
247        let config: Config = toml::from_str(&toml_str)
248            .with_context(|| format!("Failed to parse {}. Run 'mermaid init' to regenerate.", config_path.display()))?;
249        Ok(config)
250    } else {
251        Ok(Config::default())
252    }
253}
254
255/// Get the path to the single config file
256pub fn get_config_path() -> Result<PathBuf> {
257    Ok(get_config_dir()?.join("config.toml"))
258}
259
260/// Get the configuration directory
261pub fn get_config_dir() -> Result<PathBuf> {
262    if let Some(proj_dirs) = ProjectDirs::from("", "", "mermaid") {
263        let config_dir = proj_dirs.config_dir();
264        std::fs::create_dir_all(config_dir)?;
265        Ok(config_dir.to_path_buf())
266    } else {
267        // Fallback to home directory
268        let home = std::env::var("HOME")
269            .or_else(|_| std::env::var("USERPROFILE"))
270            .context("Could not determine home directory")?;
271        let config_dir = PathBuf::from(home).join(".config").join("mermaid");
272        std::fs::create_dir_all(&config_dir)?;
273        Ok(config_dir)
274    }
275}
276
277/// Save configuration to file
278pub fn save_config(config: &Config, path: Option<PathBuf>) -> Result<()> {
279    let path = if let Some(p) = path {
280        p
281    } else {
282        get_config_dir()?.join("config.toml")
283    };
284
285    let toml_string = toml::to_string_pretty(config)?;
286    std::fs::write(&path, toml_string)
287        .with_context(|| format!("Failed to write config to {}", path.display()))?;
288
289    Ok(())
290}
291
292/// Create a default configuration file if it doesn't exist
293pub fn init_config() -> Result<()> {
294    let config_file = get_config_path()?;
295
296    if config_file.exists() {
297        println!("Configuration already exists at: {}", config_file.display());
298    } else {
299        let default_config = Config::default();
300        save_config(&default_config, Some(config_file.clone()))?;
301        println!("Created configuration at: {}", config_file.display());
302    }
303
304    Ok(())
305}
306
307/// Persist the last used model to config file
308pub fn persist_last_model(model: &str) -> Result<()> {
309    let mut config = load_config().unwrap_or_default();
310    config.last_used_model = Some(model.to_string());
311    save_config(&config, None)
312}