Skip to main content

mermaid_cli/utils/
checks.rs

1/// Preemptive checks for service availability
2///
3/// These checks run BEFORE operations to provide clear, early error messages
4/// rather than cryptic failures during execution.
5
6use std::path::Path;
7use std::time::Duration;
8
9use super::retry::{retry_async, RetryConfig};
10
11/// Check result with actionable error message
12#[derive(Debug)]
13pub struct CheckResult {
14    pub available: bool,
15    pub message: String,
16}
17
18impl CheckResult {
19    pub fn ok() -> Self {
20        Self {
21            available: true,
22            message: String::new(),
23        }
24    }
25
26    pub fn fail(message: impl Into<String>) -> Self {
27        Self {
28            available: false,
29            message: message.into(),
30        }
31    }
32}
33
34/// Check if Ollama is running and responding
35///
36/// Returns early with a clear message if Ollama isn't available,
37/// rather than letting the connection fail later with a timeout.
38/// Uses retry logic since Ollama may be starting up.
39pub async fn check_ollama_available(host: &str, port: u16) -> CheckResult {
40    let url = format!("http://{}:{}/api/tags", host, port);
41    let host_owned = host.to_string();
42    let port_owned = port;
43
44    // Use a short timeout for the health check - we just want to know if it's running
45    let client = match reqwest::Client::builder()
46        .timeout(Duration::from_secs(3))
47        .build()
48    {
49        Ok(c) => c,
50        Err(e) => {
51            return CheckResult::fail(format!("Failed to create HTTP client: {}", e));
52        }
53    };
54
55    // Retry config: 3 attempts with quick backoff (Ollama might be starting)
56    let retry_config = RetryConfig {
57        max_attempts: 3,
58        initial_delay_ms: 500,
59        max_delay_ms: 2000,
60        backoff_multiplier: 2.0,
61    };
62
63    let result = retry_async(
64        || {
65            let client = client.clone();
66            let url = url.clone();
67            async move {
68                let response = client
69                    .get(&url)
70                    .send()
71                    .await
72                    .map_err(|e| anyhow::anyhow!("{}", e))?;
73
74                if response.status().is_success() {
75                    Ok(())
76                } else {
77                    Err(anyhow::anyhow!("HTTP {}", response.status()))
78                }
79            }
80        },
81        &retry_config,
82    )
83    .await;
84
85    match result {
86        Ok(()) => CheckResult::ok(),
87        Err(e) => {
88            let error_str = e.to_string();
89            let message = if error_str.contains("Connection refused") {
90                format!(
91                    "Ollama is not running at {}:{}\n\n\
92                    Start Ollama with:\n\
93                      ollama serve\n\n\
94                    Or if using systemd:\n\
95                      systemctl start ollama",
96                    host_owned, port_owned
97                )
98            } else if error_str.contains("timed out") {
99                format!(
100                    "Ollama at {}:{} is not responding (timed out)\n\n\
101                    Ollama may be overloaded or starting up. Check:\n\
102                      ollama ps        # See running models\n\
103                      ollama list      # See available models",
104                    host_owned, port_owned
105                )
106            } else {
107                format!(
108                    "Cannot connect to Ollama at {}:{}\n\n\
109                    Error: {}\n\n\
110                    Make sure Ollama is installed and running:\n\
111                      curl -fsSL https://ollama.com/install.sh | sh\n\
112                      ollama serve",
113                    host_owned, port_owned, error_str
114                )
115            };
116            CheckResult::fail(message)
117        }
118    }
119}
120
121/// Check if the given path is inside a git repository
122///
123/// Returns early with a clear message if not in a git repo,
124/// rather than letting git operations fail with confusing errors.
125pub fn check_git_repo(path: Option<&Path>) -> CheckResult {
126    let check_path = path.unwrap_or_else(|| Path::new("."));
127
128    // Walk up the directory tree looking for .git
129    let mut current = if check_path.is_absolute() {
130        check_path.to_path_buf()
131    } else {
132        match std::env::current_dir() {
133            Ok(cwd) => cwd.join(check_path),
134            Err(e) => {
135                return CheckResult::fail(format!("Cannot determine current directory: {}", e));
136            }
137        }
138    };
139
140    loop {
141        let git_dir = current.join(".git");
142        if git_dir.exists() {
143            return CheckResult::ok();
144        }
145
146        match current.parent() {
147            Some(parent) => current = parent.to_path_buf(),
148            None => break,
149        }
150    }
151
152    let path_display = path
153        .map(|p| p.display().to_string())
154        .unwrap_or_else(|| "current directory".to_string());
155
156    CheckResult::fail(format!(
157        "Not a git repository: {}\n\n\
158        Git operations require a git repository. Initialize one with:\n\
159          git init\n\n\
160        Or navigate to an existing repository.",
161        path_display
162    ))
163}
164
165/// Check if a specific model is available in Ollama
166pub async fn check_ollama_model(host: &str, port: u16, model_name: &str) -> CheckResult {
167    let url = format!("http://{}:{}/api/tags", host, port);
168    let model_name_owned = model_name.to_string();
169
170    let client = match reqwest::Client::builder()
171        .timeout(Duration::from_secs(5))
172        .build()
173    {
174        Ok(c) => c,
175        Err(e) => {
176            return CheckResult::fail(format!("Failed to create HTTP client: {}", e));
177        }
178    };
179
180    // Retry config: 2 attempts (model listing should be quick)
181    let retry_config = RetryConfig {
182        max_attempts: 2,
183        initial_delay_ms: 300,
184        max_delay_ms: 1000,
185        backoff_multiplier: 2.0,
186    };
187
188    let result = retry_async(
189        || {
190            let client = client.clone();
191            let url = url.clone();
192            async move {
193                let response = client
194                    .get(&url)
195                    .send()
196                    .await
197                    .map_err(|e| anyhow::anyhow!("Cannot connect to Ollama: {}", e))?;
198
199                if !response.status().is_success() {
200                    return Err(anyhow::anyhow!(
201                        "Ollama responded with error: {}",
202                        response.status()
203                    ));
204                }
205
206                response
207                    .json::<serde_json::Value>()
208                    .await
209                    .map_err(|e| anyhow::anyhow!("Failed to parse Ollama response: {}", e))
210            }
211        },
212        &retry_config,
213    )
214    .await;
215
216    match result {
217        Ok(json) => {
218            if let Some(models) = json.get("models").and_then(|m| m.as_array()) {
219                let model_names: Vec<&str> = models
220                    .iter()
221                    .filter_map(|m| m.get("name").and_then(|n| n.as_str()))
222                    .collect();
223
224                // Check for exact match or prefix match (e.g., "llama3" matches "llama3:latest")
225                let found = model_names.iter().any(|name| {
226                    *name == model_name_owned
227                        || name.starts_with(&format!("{}:", model_name_owned))
228                        || model_name_owned.starts_with(&format!("{}:", name))
229                });
230
231                if found {
232                    CheckResult::ok()
233                } else {
234                    let available = if model_names.is_empty() {
235                        "No models installed".to_string()
236                    } else {
237                        model_names.join(", ")
238                    };
239                    CheckResult::fail(format!(
240                        "Model '{}' not found in Ollama\n\n\
241                        Available models: {}\n\n\
242                        Pull the model with:\n\
243                          ollama pull {}",
244                        model_name_owned, available, model_name_owned
245                    ))
246                }
247            } else {
248                CheckResult::fail("Invalid response from Ollama: missing models list")
249            }
250        }
251        Err(e) => CheckResult::fail(e.to_string()),
252    }
253}
254
255#[cfg(test)]
256mod tests {
257    use super::*;
258
259    #[test]
260    fn test_check_git_repo_completes_without_panic() {
261        // Test that check_git_repo completes without panicking
262        // The result depends on where tests are run, so we just verify it works
263        let result = check_git_repo(None);
264        // Either available (in a git repo) or has an error message (not in a git repo)
265        assert!(result.available || !result.message.is_empty());
266    }
267
268    #[test]
269    fn test_check_git_repo_with_explicit_path() {
270        // Test with an explicit path
271        let result = check_git_repo(Some(Path::new("/tmp")));
272        // /tmp is unlikely to be a git repo, but check completes either way
273        assert!(result.available || !result.message.is_empty());
274    }
275
276    #[test]
277    fn test_check_git_repo_error_message_is_helpful() {
278        // When not in a git repo, error message should be helpful
279        let result = check_git_repo(Some(Path::new("/tmp")));
280        if !result.available {
281            assert!(result.message.contains("git init") || result.message.contains("Not a git repository"));
282        }
283    }
284
285    #[test]
286    fn test_check_result_ok() {
287        let result = CheckResult::ok();
288        assert!(result.available);
289        assert!(result.message.is_empty());
290    }
291
292    #[test]
293    fn test_check_result_fail() {
294        let result = CheckResult::fail("test error");
295        assert!(!result.available);
296        assert_eq!(result.message, "test error");
297    }
298
299    #[tokio::test]
300    async fn test_check_ollama_available_completes() {
301        // Just verify the check completes without panicking
302        // Ollama may or may not be running during tests
303        let result = check_ollama_available("localhost", 11434).await;
304        assert!(result.available || !result.message.is_empty());
305    }
306
307    #[tokio::test]
308    async fn test_check_ollama_error_message_is_helpful() {
309        // When Ollama is not running, error message should be helpful
310        // Use a port that's definitely not Ollama
311        let result = check_ollama_available("localhost", 59999).await;
312        if !result.available {
313            assert!(
314                result.message.contains("ollama serve") || result.message.contains("not running"),
315                "Error should include actionable advice"
316            );
317        }
318    }
319}