Skip to main content

mermaid_cli/app/
config.rs

1use crate::constants::{DEFAULT_MAX_TOKENS, DEFAULT_OLLAMA_PORT, DEFAULT_TEMPERATURE};
2use anyhow::{Context, Result};
3use directories::ProjectDirs;
4use serde::{Deserialize, Serialize};
5use std::path::PathBuf;
6
7/// Main configuration structure
8#[derive(Debug, Clone, Default, Serialize, Deserialize)]
9pub struct Config {
10    /// Last used model (persisted between sessions)
11    #[serde(default)]
12    pub last_used_model: Option<String>,
13
14    /// Default model configuration
15    #[serde(default)]
16    pub default_model: ModelSettings,
17
18    /// Ollama configuration
19    #[serde(default)]
20    pub ollama: OllamaConfig,
21
22    /// Non-interactive mode configuration
23    #[serde(default)]
24    pub non_interactive: NonInteractiveConfig,
25}
26
27/// Default model settings
28#[derive(Debug, Clone, Serialize, Deserialize)]
29#[serde(default)]
30pub struct ModelSettings {
31    /// Model provider (ollama, openai, anthropic)
32    pub provider: String,
33    /// Model name
34    pub name: String,
35    /// Temperature for generation
36    pub temperature: f32,
37    /// Maximum tokens to generate
38    pub max_tokens: usize,
39}
40
41impl Default for ModelSettings {
42    fn default() -> Self {
43        Self {
44            provider: String::new(),
45            name: String::new(),
46            temperature: DEFAULT_TEMPERATURE,
47            max_tokens: DEFAULT_MAX_TOKENS,
48        }
49    }
50}
51
52/// Ollama configuration
53#[derive(Debug, Clone, Serialize, Deserialize)]
54#[serde(default)]
55pub struct OllamaConfig {
56    /// Ollama server host
57    pub host: String,
58    /// Ollama server port
59    pub port: u16,
60    /// Ollama cloud API key (for :cloud models)
61    /// Set this to use Ollama's cloud inference service
62    /// Get your key at: https://ollama.com/cloud
63    pub cloud_api_key: Option<String>,
64    /// Number of GPU layers to offload (None = auto, 0 = CPU only, positive = specific count)
65    /// Lower values free up VRAM for larger models at the cost of speed
66    pub num_gpu: Option<i32>,
67    /// Number of CPU threads for processing offloaded layers
68    /// Higher values improve CPU inference speed for large models
69    pub num_thread: Option<i32>,
70    /// Context window size (number of tokens)
71    /// Larger values allow longer conversations but use more memory
72    pub num_ctx: Option<i32>,
73    /// Enable NUMA optimization for multi-CPU systems
74    pub numa: Option<bool>,
75}
76
77impl Default for OllamaConfig {
78    fn default() -> Self {
79        Self {
80            host: String::from("localhost"),
81            port: DEFAULT_OLLAMA_PORT,
82            cloud_api_key: None,
83            num_gpu: None,    // Let Ollama auto-detect
84            num_thread: None, // Let Ollama auto-detect
85            num_ctx: None,    // Use model default
86            numa: None,       // Auto-detect
87        }
88    }
89}
90
91/// Non-interactive mode configuration
92#[derive(Debug, Clone, Serialize, Deserialize)]
93#[serde(default)]
94pub struct NonInteractiveConfig {
95    /// Output format (text, json, markdown)
96    pub output_format: String,
97    /// Maximum tokens to generate
98    pub max_tokens: usize,
99    /// Don't execute agent actions (dry run)
100    pub no_execute: bool,
101}
102
103impl Default for NonInteractiveConfig {
104    fn default() -> Self {
105        Self {
106            output_format: String::from("text"),
107            max_tokens: DEFAULT_MAX_TOKENS,
108            no_execute: false,
109        }
110    }
111}
112
113/// Load configuration from single config file
114/// Priority: config file > defaults (that's it - no merging, no env vars)
115pub fn load_config() -> Result<Config> {
116    let config_path = get_config_path()?;
117
118    if config_path.exists() {
119        let toml_str = std::fs::read_to_string(&config_path)
120            .with_context(|| format!("Failed to read {}", config_path.display()))?;
121        let config: Config = toml::from_str(&toml_str).with_context(|| {
122            format!(
123                "Failed to parse {}. Run 'mermaid init' to regenerate.",
124                config_path.display()
125            )
126        })?;
127        Ok(config)
128    } else {
129        Ok(Config::default())
130    }
131}
132
133/// Get the path to the single config file
134pub fn get_config_path() -> Result<PathBuf> {
135    Ok(get_config_dir()?.join("config.toml"))
136}
137
138/// Get the configuration directory
139pub fn get_config_dir() -> Result<PathBuf> {
140    if let Some(proj_dirs) = ProjectDirs::from("", "", "mermaid") {
141        let config_dir = proj_dirs.config_dir();
142        std::fs::create_dir_all(config_dir)?;
143        Ok(config_dir.to_path_buf())
144    } else {
145        // Fallback to home directory
146        let home = std::env::var("HOME")
147            .or_else(|_| std::env::var("USERPROFILE"))
148            .context("Could not determine home directory")?;
149        let config_dir = PathBuf::from(home).join(".config").join("mermaid");
150        std::fs::create_dir_all(&config_dir)?;
151        Ok(config_dir)
152    }
153}
154
155/// Save configuration to file
156pub fn save_config(config: &Config, path: Option<PathBuf>) -> Result<()> {
157    let path = if let Some(p) = path {
158        p
159    } else {
160        get_config_dir()?.join("config.toml")
161    };
162
163    let toml_string = toml::to_string_pretty(config)?;
164    std::fs::write(&path, toml_string)
165        .with_context(|| format!("Failed to write config to {}", path.display()))?;
166
167    Ok(())
168}
169
170/// Create a default configuration file if it doesn't exist
171pub fn init_config() -> Result<()> {
172    let config_file = get_config_path()?;
173
174    if config_file.exists() {
175        println!("Configuration already exists at: {}", config_file.display());
176    } else {
177        let default_config = Config::default();
178        save_config(&default_config, Some(config_file.clone()))?;
179        println!("Created configuration at: {}", config_file.display());
180    }
181
182    Ok(())
183}
184
185/// Persist the last used model to config file
186pub fn persist_last_model(model: &str) -> Result<()> {
187    let mut config = load_config().unwrap_or_default();
188    config.last_used_model = Some(model.to_string());
189    save_config(&config, None)
190}
191
192/// Resolve which model to use: CLI arg > last_used > default_model > any available
193pub async fn resolve_model_id(cli_model: Option<&str>, config: &Config) -> anyhow::Result<String> {
194    if let Some(model) = cli_model {
195        return Ok(model.to_string());
196    }
197    if let Some(last_model) = &config.last_used_model {
198        return Ok(last_model.clone());
199    }
200    if !config.default_model.provider.is_empty() && !config.default_model.name.is_empty() {
201        return Ok(format!(
202            "{}/{}",
203            config.default_model.provider, config.default_model.name
204        ));
205    }
206    let available = crate::ollama::require_any_model().await?;
207    Ok(format!("ollama/{}", available[0]))
208}