1use anyhow::{Context, Result};
2use serde::Deserialize;
3use std::path::{Path, PathBuf};
4
5#[derive(Debug, Clone)]
8pub struct AppConfig {
9 pub mode: AppMode,
10}
11
12#[derive(Debug, Clone)]
14pub enum AppMode {
15 Local,
17 Llm(LlmModeConfig),
19}
20
21#[derive(Debug, Clone)]
23pub struct LlmModeConfig {
24 pub provider: String,
25 pub endpoint: String,
26 pub model: String,
27 pub api_key: Option<String>,
28 pub timeout_secs: u64,
29}
30
31#[derive(Debug, Deserialize, Default)]
34struct TomlConfig {
35 mode: Option<TomlMode>,
36 llm: Option<TomlLlm>,
37}
38
39#[derive(Debug, Deserialize, Default)]
40struct TomlMode {
41 active: Option<String>,
42}
43
44#[derive(Debug, Deserialize, Default)]
45struct TomlLlm {
46 provider: Option<String>,
47 endpoint: Option<String>,
48 model: Option<String>,
49 api_key: Option<String>,
50 timeout_secs: Option<u64>,
51}
52
53impl AppConfig {
54 pub fn from_file(path: Option<&Path>) -> Result<Self> {
56 let toml_config = match path {
57 Some(p) => load_toml_from_path(p)?,
58 None => find_and_load_config()?,
59 };
60 Ok(Self::from_toml(toml_config))
61 }
62
63 fn from_toml(toml: TomlConfig) -> Self {
65 let active_mode = toml
66 .mode
67 .as_ref()
68 .and_then(|m| m.active.as_deref())
69 .unwrap_or("local");
70
71 let mode = match active_mode {
72 "llm" => {
73 let llm = toml.llm.unwrap_or_default();
74 let provider = llm.provider.unwrap_or_else(|| "ollama".to_string());
75 let (default_endpoint, default_model) = match provider.as_str() {
76 "ollama" => ("http://localhost:11434", "llama3.2"),
77 _ => ("http://localhost:1234", "gpt-3.5-turbo"),
78 };
79 AppMode::Llm(LlmModeConfig {
80 endpoint: llm
81 .endpoint
82 .filter(|e| !e.is_empty())
83 .unwrap_or_else(|| default_endpoint.to_string()),
84 model: llm
85 .model
86 .filter(|m| !m.is_empty())
87 .unwrap_or_else(|| default_model.to_string()),
88 api_key: llm.api_key.filter(|k| !k.is_empty()),
89 timeout_secs: llm.timeout_secs.unwrap_or(30),
90 provider,
91 })
92 }
93 _ => AppMode::Local,
94 };
95
96 Self { mode }
97 }
98
99 pub fn merge_cli(
101 &mut self,
102 llm_flag: bool,
103 llm_provider: &str,
104 llm_endpoint: Option<&str>,
105 llm_model: Option<&str>,
106 llm_api_key: Option<&str>,
107 llm_timeout: Option<u64>, ) {
109 if llm_flag {
111 let provider = llm_provider.to_string();
112 let (default_endpoint, default_model) = match provider.as_str() {
113 "ollama" => ("http://localhost:11434", "llama3.2"),
114 _ => ("http://localhost:1234", "gpt-3.5-turbo"),
115 };
116 self.mode = AppMode::Llm(LlmModeConfig {
117 endpoint: llm_endpoint.unwrap_or(default_endpoint).to_string(),
118 model: llm_model.unwrap_or(default_model).to_string(),
119 api_key: llm_api_key.map(String::from),
120 timeout_secs: llm_timeout.unwrap_or(30), provider,
122 });
123 }
124
125 if let AppMode::Llm(ref mut llm_cfg) = self.mode {
127 if let Some(ep) = llm_endpoint {
128 llm_cfg.endpoint = ep.to_string();
129 }
130 if let Some(m) = llm_model {
131 llm_cfg.model = m.to_string();
132 }
133 if let Some(k) = llm_api_key {
134 llm_cfg.api_key = Some(k.to_string());
135 }
136 if let Some(timeout) = llm_timeout {
138 llm_cfg.timeout_secs = timeout;
139 }
140 }
141 }
142}
143
144fn load_toml_from_path(path: &Path) -> Result<TomlConfig> {
146 let content = std::fs::read_to_string(path)
147 .with_context(|| format!("Failed to read config file: {}", path.display()))?;
148 toml::from_str(&content)
149 .with_context(|| format!("Failed to parse config file: {}", path.display()))
150}
151
152fn find_and_load_config() -> Result<TomlConfig> {
154 let local_config = PathBuf::from("config.toml");
156 if local_config.exists() {
157 return load_toml_from_path(&local_config);
158 }
159
160 if let Ok(home) = std::env::var("HOME") {
161 let user_config = PathBuf::from(home)
162 .join(".config")
163 .join("garbage-code-hunter")
164 .join("config.toml");
165 if user_config.exists() {
166 return load_toml_from_path(&user_config);
167 }
168 }
169
170 Ok(TomlConfig::default())
171}
172
173#[cfg(test)]
174mod tests {
175 use super::*;
176
177 #[test]
178 fn test_config_defaults_when_no_file() {
179 let config = AppConfig::from_toml(TomlConfig::default());
180 assert!(matches!(config.mode, AppMode::Local));
181 }
182
183 #[test]
184 fn test_config_parse_local_mode() {
185 let toml_str = r#"
186[mode]
187active = "local"
188"#;
189 let toml_config: TomlConfig = toml::from_str(toml_str).unwrap();
190 let config = AppConfig::from_toml(toml_config);
191 assert!(matches!(config.mode, AppMode::Local));
192 }
193
194 #[test]
195 fn test_config_parse_llm_mode() {
196 let toml_str = r#"
197[mode]
198active = "llm"
199
200[llm]
201provider = "ollama"
202endpoint = "http://custom:11434"
203model = "llama3.1"
204timeout_secs = 60
205"#;
206 let toml_config: TomlConfig = toml::from_str(toml_str).unwrap();
207 let config = AppConfig::from_toml(toml_config);
208 match config.mode {
209 AppMode::Llm(llm) => {
210 assert_eq!(llm.provider, "ollama");
211 assert_eq!(llm.endpoint, "http://custom:11434");
212 assert_eq!(llm.model, "llama3.1");
213 assert_eq!(llm.timeout_secs, 60);
214 }
215 _ => panic!("Expected LLM mode"),
216 }
217 }
218
219 #[test]
220 fn test_config_llm_defaults() {
221 let toml_str = r#"
222[mode]
223active = "llm"
224"#;
225 let toml_config: TomlConfig = toml::from_str(toml_str).unwrap();
226 let config = AppConfig::from_toml(toml_config);
227 match config.mode {
228 AppMode::Llm(llm) => {
229 assert_eq!(llm.provider, "ollama");
230 assert_eq!(llm.endpoint, "http://localhost:11434");
231 assert_eq!(llm.model, "llama3.2");
232 assert_eq!(llm.timeout_secs, 30);
233 assert!(llm.api_key.is_none());
234 }
235 _ => panic!("Expected LLM mode"),
236 }
237 }
238
239 #[test]
240 fn test_config_llm_openai_compatible() {
241 let toml_str = r#"
242[mode]
243active = "llm"
244
245[llm]
246provider = "openai-compatible"
247api_key = "sk-test123"
248"#;
249 let toml_config: TomlConfig = toml::from_str(toml_str).unwrap();
250 let config = AppConfig::from_toml(toml_config);
251 match config.mode {
252 AppMode::Llm(llm) => {
253 assert_eq!(llm.provider, "openai-compatible");
254 assert_eq!(llm.endpoint, "http://localhost:1234");
255 assert_eq!(llm.model, "gpt-3.5-turbo");
256 assert_eq!(llm.api_key, Some("sk-test123".to_string()));
257 }
258 _ => panic!("Expected LLM mode"),
259 }
260 }
261
262 #[test]
263 fn test_config_invalid_toml() {
264 let result = toml::from_str::<TomlConfig>("this is not valid toml [[[");
265 assert!(result.is_err());
266 }
267
268 #[test]
269 fn test_config_cli_overrides_file() {
270 let toml_str = r#"
271[mode]
272active = "local"
273
274[llm]
275provider = "ollama"
276"#;
277 let toml_config: TomlConfig = toml::from_str(toml_str).unwrap();
278 let mut config = AppConfig::from_toml(toml_config);
279 assert!(matches!(config.mode, AppMode::Local));
280
281 config.merge_cli(
283 true,
284 "openai-compatible",
285 None,
286 None,
287 Some("sk-key"),
288 Some(60),
289 );
290 match config.mode {
291 AppMode::Llm(llm) => {
292 assert_eq!(llm.provider, "openai-compatible");
293 assert_eq!(llm.api_key, Some("sk-key".to_string()));
294 assert_eq!(llm.timeout_secs, 60);
295 }
296 _ => panic!("Expected LLM mode after CLI override"),
297 }
298 }
299}