Skip to main content

chasm/providers/
ollama.rs

1// Copyright (c) 2024-2026 Nervosys LLC
2// SPDX-License-Identifier: AGPL-3.0-only
3//! Ollama provider for local LLM inference
4
5#![allow(dead_code)]
6
7use super::{ChatProvider, ProviderType};
8use crate::models::{ChatMessage, ChatRequest, ChatSession};
9use anyhow::Result;
10use serde::{Deserialize, Serialize};
11use std::path::PathBuf;
12
13/// Ollama API provider
14///
15/// Ollama runs local LLMs and provides an API at http://localhost:11434
16/// It can also be configured to save conversation history.
17pub struct OllamaProvider {
18    /// API endpoint URL
19    endpoint: String,
20    /// Whether Ollama is available
21    available: bool,
22    /// Path to Ollama's data directory (for history)
23    data_path: Option<PathBuf>,
24}
25
26/// Ollama API response for listing models
27#[derive(Debug, Deserialize)]
28struct OllamaModelsResponse {
29    models: Vec<OllamaModel>,
30}
31
32/// Ollama model info
33#[derive(Debug, Deserialize)]
34struct OllamaModel {
35    name: String,
36    modified_at: Option<String>,
37    size: Option<u64>,
38}
39
40/// Ollama chat message format
41#[derive(Debug, Serialize, Deserialize)]
42struct OllamaChatMessage {
43    role: String,
44    content: String,
45}
46
47/// Ollama chat request
48#[derive(Debug, Serialize)]
49struct OllamaChatRequest {
50    model: String,
51    messages: Vec<OllamaChatMessage>,
52    stream: bool,
53}
54
55/// Ollama chat response
56#[derive(Debug, Deserialize)]
57struct OllamaChatResponse {
58    message: OllamaChatMessage,
59    done: bool,
60}
61
62impl OllamaProvider {
63    /// Discover Ollama installation and create provider
64    pub fn discover() -> Option<Self> {
65        let endpoint =
66            std::env::var("OLLAMA_HOST").unwrap_or_else(|_| "http://localhost:11434".to_string());
67
68        let data_path = Self::find_ollama_data();
69
70        // Check if Ollama is running
71        let available = Self::check_availability(&endpoint);
72
73        Some(Self {
74            endpoint,
75            available,
76            data_path,
77        })
78    }
79
80    /// Find Ollama's data directory
81    fn find_ollama_data() -> Option<PathBuf> {
82        // Check OLLAMA_MODELS environment variable first
83        if let Ok(models_path) = std::env::var("OLLAMA_MODELS") {
84            return Some(PathBuf::from(models_path));
85        }
86
87        #[cfg(target_os = "windows")]
88        {
89            let home = dirs::home_dir()?;
90            let path = home.join(".ollama");
91            if path.exists() {
92                return Some(path);
93            }
94        }
95
96        #[cfg(target_os = "macos")]
97        {
98            let home = dirs::home_dir()?;
99            let path = home.join(".ollama");
100            if path.exists() {
101                return Some(path);
102            }
103        }
104
105        #[cfg(target_os = "linux")]
106        {
107            // Check XDG data dir first
108            if let Some(data_dir) = dirs::data_dir() {
109                let path = data_dir.join("ollama");
110                if path.exists() {
111                    return Some(path);
112                }
113            }
114            // Fall back to home directory
115            let home = dirs::home_dir()?;
116            let path = home.join(".ollama");
117            if path.exists() {
118                return Some(path);
119            }
120        }
121
122        None
123    }
124
125    /// Check if Ollama API is available
126    fn check_availability(endpoint: &str) -> bool {
127        // Try to connect to Ollama API
128        // We use a simple blocking check here
129        let _url = format!("{}/api/tags", endpoint);
130
131        // Use ureq for simple HTTP requests (add to Cargo.toml if needed)
132        // For now, we'll just check if the endpoint looks valid
133        // and assume it's available if configured
134        !endpoint.is_empty()
135    }
136
137    /// List available models from Ollama
138    pub fn list_models(&self) -> Result<Vec<String>> {
139        if !self.available {
140            return Ok(Vec::new());
141        }
142
143        // This would make an HTTP request to /api/tags
144        // For now, return empty list - implement with reqwest/ureq later
145        Ok(Vec::new())
146    }
147
148    /// Convert Ollama chat history to CSM session format
149    fn convert_to_session(&self, messages: Vec<OllamaChatMessage>, model: &str) -> ChatSession {
150        let now = chrono::Utc::now().timestamp_millis();
151        let session_id = uuid::Uuid::new_v4().to_string();
152
153        let mut requests = Vec::new();
154        let mut user_msg: Option<String> = None;
155
156        for msg in messages {
157            match msg.role.as_str() {
158                "user" => {
159                    user_msg = Some(msg.content);
160                }
161                "assistant" => {
162                    if let Some(user_text) = user_msg.take() {
163                        requests.push(ChatRequest {
164                            timestamp: Some(now),
165                            message: Some(ChatMessage {
166                                text: Some(user_text),
167                                parts: None,
168                            }),
169                            response: Some(serde_json::json!({
170                                "value": [{"value": msg.content}]
171                            })),
172                            variable_data: None,
173                            request_id: Some(uuid::Uuid::new_v4().to_string()),
174                            response_id: Some(uuid::Uuid::new_v4().to_string()),
175                            model_id: Some(format!("ollama/{}", model)),
176                            agent: None,
177                            result: None,
178                            followups: None,
179                            is_canceled: Some(false),
180                            content_references: None,
181                            code_citations: None,
182                            response_markdown_info: None,
183                            source_session: None,
184                            model_state: None,
185                            time_spent_waiting: None,
186                        });
187                    }
188                }
189                _ => {}
190            }
191        }
192
193        ChatSession {
194            version: 3,
195            session_id: Some(session_id),
196            creation_date: now,
197            last_message_date: now,
198            is_imported: true,
199            initial_location: "ollama".to_string(),
200            custom_title: Some(format!("Ollama Chat ({})", model)),
201            requester_username: Some("user".to_string()),
202            requester_avatar_icon_uri: None,
203            responder_username: Some(format!("Ollama/{}", model)),
204            responder_avatar_icon_uri: None,
205            requests,
206        }
207    }
208}
209
210impl ChatProvider for OllamaProvider {
211    fn provider_type(&self) -> ProviderType {
212        ProviderType::Ollama
213    }
214
215    fn name(&self) -> &str {
216        "Ollama"
217    }
218
219    fn is_available(&self) -> bool {
220        self.available
221    }
222
223    fn sessions_path(&self) -> Option<PathBuf> {
224        self.data_path.clone()
225    }
226
227    fn list_sessions(&self) -> Result<Vec<ChatSession>> {
228        // Ollama doesn't persist chat history by default
229        // This would need integration with Ollama's history feature
230        // or a custom persistence layer
231        Ok(Vec::new())
232    }
233
234    fn import_session(&self, _session_id: &str) -> Result<ChatSession> {
235        anyhow::bail!("Ollama does not persist chat sessions by default")
236    }
237
238    fn export_session(&self, _session: &ChatSession) -> Result<()> {
239        // Could implement by sending messages to Ollama to recreate context
240        anyhow::bail!("Export to Ollama not yet implemented")
241    }
242}
243
244/// Create an Ollama chat session from a conversation
245pub fn create_ollama_session(
246    messages: Vec<(String, String)>, // (user_msg, assistant_msg) pairs
247    model: &str,
248) -> ChatSession {
249    let provider = OllamaProvider {
250        endpoint: String::new(),
251        available: false,
252        data_path: None,
253    };
254
255    let ollama_messages: Vec<OllamaChatMessage> = messages
256        .into_iter()
257        .flat_map(|(user, assistant)| {
258            vec![
259                OllamaChatMessage {
260                    role: "user".to_string(),
261                    content: user,
262                },
263                OllamaChatMessage {
264                    role: "assistant".to_string(),
265                    content: assistant,
266                },
267            ]
268        })
269        .collect();
270
271    provider.convert_to_session(ollama_messages, model)
272}