Skip to main content

mermaid_cli/utils/
checks.rs

1//! Preemptive checks for service availability
2//!
3//! These checks run BEFORE operations to provide clear, early error messages
4//! rather than cryptic failures during execution.
5
6use std::time::Duration;
7
8use super::retry::{RetryConfig, retry_async};
9
10/// Check result with actionable error message
11#[derive(Debug)]
12#[must_use]
13pub struct CheckResult {
14    pub available: bool,
15    pub message: String,
16}
17
18impl CheckResult {
19    pub fn ok() -> Self {
20        Self {
21            available: true,
22            message: String::new(),
23        }
24    }
25
26    pub fn fail(message: impl Into<String>) -> Self {
27        Self {
28            available: false,
29            message: message.into(),
30        }
31    }
32}
33
34/// Check if Ollama is running and responding
35///
36/// Returns early with a clear message if Ollama isn't available,
37/// rather than letting the connection fail later with a timeout.
38/// Uses retry logic since Ollama may be starting up.
39pub async fn check_ollama_available(host: &str, port: u16) -> CheckResult {
40    let url = format!("http://{}:{}/api/tags", host, port);
41    let host_owned = host.to_string();
42    let port_owned = port;
43
44    // Use a short timeout for the health check - we just want to know if it's running
45    let client = match reqwest::Client::builder()
46        .timeout(Duration::from_secs(3))
47        .build()
48    {
49        Ok(c) => c,
50        Err(e) => {
51            return CheckResult::fail(format!("Failed to create HTTP client: {}", e));
52        },
53    };
54
55    // Retry config: 3 attempts with quick backoff (Ollama might be starting)
56    let retry_config = RetryConfig {
57        max_attempts: 3,
58        initial_delay_ms: 500,
59        max_delay_ms: 2000,
60        backoff_multiplier: 2.0,
61    };
62
63    let result = retry_async(
64        || {
65            let client = client.clone();
66            let url = url.clone();
67            async move {
68                let response = client
69                    .get(&url)
70                    .send()
71                    .await
72                    .map_err(|e| anyhow::anyhow!("{}", e))?;
73
74                if response.status().is_success() {
75                    Ok(())
76                } else {
77                    Err(anyhow::anyhow!("HTTP {}", response.status()))
78                }
79            }
80        },
81        &retry_config,
82    )
83    .await;
84
85    match result {
86        Ok(()) => CheckResult::ok(),
87        Err(e) => {
88            let error_str = e.to_string();
89            let message = if error_str.contains("Connection refused") {
90                format!(
91                    "Ollama is not running at {}:{}\n\n\
92                    Start Ollama with:\n\
93                      ollama serve\n\n\
94                    Or if using systemd:\n\
95                      systemctl start ollama",
96                    host_owned, port_owned
97                )
98            } else if error_str.contains("timed out") {
99                format!(
100                    "Ollama at {}:{} is not responding (timed out)\n\n\
101                    Ollama may be overloaded or starting up. Check:\n\
102                      ollama ps        # See running models\n\
103                      ollama list      # See available models",
104                    host_owned, port_owned
105                )
106            } else {
107                format!(
108                    "Cannot connect to Ollama at {}:{}\n\n\
109                    Error: {}\n\n\
110                    Make sure Ollama is installed and running:\n\
111                      curl -fsSL https://ollama.com/install.sh | sh\n\
112                      ollama serve",
113                    host_owned, port_owned, error_str
114                )
115            };
116            CheckResult::fail(message)
117        },
118    }
119}
120
121/// Check if a specific model is available in Ollama
122pub async fn check_ollama_model(host: &str, port: u16, model_name: &str) -> CheckResult {
123    let url = format!("http://{}:{}/api/tags", host, port);
124    let model_name_owned = model_name.to_string();
125
126    let client = match reqwest::Client::builder()
127        .timeout(Duration::from_secs(5))
128        .build()
129    {
130        Ok(c) => c,
131        Err(e) => {
132            return CheckResult::fail(format!("Failed to create HTTP client: {}", e));
133        },
134    };
135
136    // Retry config: 2 attempts (model listing should be quick)
137    let retry_config = RetryConfig {
138        max_attempts: 2,
139        initial_delay_ms: 300,
140        max_delay_ms: 1000,
141        backoff_multiplier: 2.0,
142    };
143
144    let result = retry_async(
145        || {
146            let client = client.clone();
147            let url = url.clone();
148            async move {
149                let response = client
150                    .get(&url)
151                    .send()
152                    .await
153                    .map_err(|e| anyhow::anyhow!("Cannot connect to Ollama: {}", e))?;
154
155                if !response.status().is_success() {
156                    return Err(anyhow::anyhow!(
157                        "Ollama responded with error: {}",
158                        response.status()
159                    ));
160                }
161
162                response
163                    .json::<serde_json::Value>()
164                    .await
165                    .map_err(|e| anyhow::anyhow!("Failed to parse Ollama response: {}", e))
166            }
167        },
168        &retry_config,
169    )
170    .await;
171
172    match result {
173        Ok(json) => {
174            if let Some(models) = json.get("models").and_then(|m| m.as_array()) {
175                let model_names: Vec<&str> = models
176                    .iter()
177                    .filter_map(|m| m.get("name").and_then(|n| n.as_str()))
178                    .collect();
179
180                // Check for exact match or prefix match (e.g., "llama3" matches "llama3:latest")
181                let found = model_names.iter().any(|name| {
182                    *name == model_name_owned
183                        || name.starts_with(&format!("{}:", model_name_owned))
184                        || model_name_owned.starts_with(&format!("{}:", name))
185                });
186
187                if found {
188                    CheckResult::ok()
189                } else {
190                    let available = if model_names.is_empty() {
191                        "No models installed".to_string()
192                    } else {
193                        model_names.join(", ")
194                    };
195                    CheckResult::fail(format!(
196                        "Model '{}' not found in Ollama\n\n\
197                        Available models: {}\n\n\
198                        Pull the model with:\n\
199                          ollama pull {}",
200                        model_name_owned, available, model_name_owned
201                    ))
202                }
203            } else {
204                CheckResult::fail("Invalid response from Ollama: missing models list")
205            }
206        },
207        Err(e) => CheckResult::fail(e.to_string()),
208    }
209}
210
211#[cfg(test)]
212mod tests {
213    use super::*;
214
215    #[test]
216    fn test_check_result_ok() {
217        let result = CheckResult::ok();
218        assert!(result.available);
219        assert!(result.message.is_empty());
220    }
221
222    #[test]
223    fn test_check_result_fail() {
224        let result = CheckResult::fail("test error");
225        assert!(!result.available);
226        assert_eq!(result.message, "test error");
227    }
228
229    #[tokio::test]
230    async fn test_check_ollama_available_completes() {
231        // Just verify the check completes without panicking
232        // Ollama may or may not be running during tests
233        let result = check_ollama_available("localhost", 11434).await;
234        assert!(result.available || !result.message.is_empty());
235    }
236
237    #[tokio::test]
238    async fn test_check_ollama_error_message_is_helpful() {
239        // When Ollama is not running, error message should be helpful
240        // Use a port that's definitely not Ollama
241        let result = check_ollama_available("localhost", 59999).await;
242        if !result.available {
243            assert!(
244                result.message.contains("ollama serve") || result.message.contains("not running"),
245                "Error should include actionable advice"
246            );
247        }
248    }
249}