Skip to main content

mermaid_cli/models/
types.rs

1use crate::agents::ActionDisplay;
2use serde::{Deserialize, Serialize};
3use std::sync::Arc;
4
5/// Represents a chat message
6#[derive(Debug, Clone, Serialize, Deserialize)]
7pub struct ChatMessage {
8    pub role: MessageRole,
9    pub content: String,
10    pub timestamp: chrono::DateTime<chrono::Local>,
11    /// Actions performed during this message (for display purposes)
12    #[serde(default)]
13    pub actions: Vec<ActionDisplay>,
14    /// Thinking/reasoning content (for models that expose their thought process)
15    #[serde(default)]
16    pub thinking: Option<String>,
17    /// Base64-encoded images/PDFs for multimodal models
18    #[serde(default)]
19    pub images: Option<Vec<String>>,
20    /// Tool calls from the model (Ollama native function calling)
21    #[serde(default)]
22    pub tool_calls: Option<Vec<crate::models::tool_call::ToolCall>>,
23    /// Tool call ID for tool result messages (OpenAI-compatible format)
24    /// This links the tool result back to the original tool_call from the assistant
25    #[serde(default)]
26    pub tool_call_id: Option<String>,
27    /// Tool name for tool result messages (required by Ollama API)
28    /// This tells the model which function's result is being returned
29    #[serde(default)]
30    pub tool_name: Option<String>,
31}
32
33impl ChatMessage {
34    /// Create a user message
35    pub fn user(content: impl Into<String>) -> Self {
36        Self::new(MessageRole::User, content.into())
37    }
38
39    /// Create an assistant message
40    pub fn assistant(content: impl Into<String>) -> Self {
41        Self::new(MessageRole::Assistant, content.into())
42    }
43
44    /// Create a system message
45    pub fn system(content: impl Into<String>) -> Self {
46        Self::new(MessageRole::System, content.into())
47    }
48
49    /// Create a tool result message
50    pub fn tool(tool_call_id: impl Into<String>, tool_name: impl Into<String>, content: impl Into<String>) -> Self {
51        Self {
52            role: MessageRole::Tool,
53            content: content.into(),
54            timestamp: chrono::Local::now(),
55            actions: Vec::new(),
56            thinking: None,
57            images: None,
58            tool_calls: None,
59            tool_call_id: Some(tool_call_id.into()),
60            tool_name: Some(tool_name.into()),
61        }
62    }
63
64    /// Base constructor with role and content
65    fn new(role: MessageRole, content: String) -> Self {
66        Self {
67            role,
68            content,
69            timestamp: chrono::Local::now(),
70            actions: Vec::new(),
71            thinking: None,
72            images: None,
73            tool_calls: None,
74            tool_call_id: None,
75            tool_name: None,
76        }
77    }
78
79    /// Builder: attach images
80    pub fn with_images(mut self, images: Vec<String>) -> Self {
81        self.images = Some(images);
82        self
83    }
84
85    /// Builder: attach tool calls
86    pub fn with_tool_calls(mut self, tool_calls: Vec<crate::models::tool_call::ToolCall>) -> Self {
87        self.tool_calls = if tool_calls.is_empty() { None } else { Some(tool_calls) };
88        self
89    }
90
91    /// Extract thinking blocks from message content
92    /// Returns (thinking_content, answer_content)
93    ///
94    /// Safety: `str::find()` returns byte offsets. The markers "Thinking..." and
95    /// "...done thinking." are pure ASCII, so adding their `.len()` to the byte
96    /// offset always lands on a valid UTF-8 char boundary.
97    pub fn extract_thinking(text: &str) -> (Option<String>, String) {
98        // Check if the text contains thinking blocks
99        if !text.contains("Thinking...") {
100            return (None, text.to_string());
101        }
102
103        // Find thinking block boundaries
104        if let Some(thinking_start) = text.find("Thinking...")
105            && let Some(thinking_end) = text.find("...done thinking.") {
106                // Extract thinking content (everything between markers)
107                let thinking_content_start = thinking_start + "Thinking...".len();
108                let thinking_text = text[thinking_content_start..thinking_end].trim().to_string();
109
110                // Extract answer (everything after thinking block)
111                let answer_start = thinking_end + "...done thinking.".len();
112                let answer_text = text[answer_start..].trim().to_string();
113
114                return (Some(thinking_text), answer_text);
115            }
116
117        // If we found "Thinking..." but not the end marker, treat it all as thinking in progress
118        if let Some(thinking_start) = text.find("Thinking...") {
119            let thinking_content_start = thinking_start + "Thinking...".len();
120            let thinking_text = text[thinking_content_start..].trim().to_string();
121            return (Some(thinking_text), String::new());
122        }
123
124        (None, text.to_string())
125    }
126}
127
128#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
129pub enum MessageRole {
130    User,
131    Assistant,
132    System,
133    /// Tool result message (OpenAI-compatible format for function calling)
134    Tool,
135}
136
137/// Response from a model
138#[derive(Debug, Clone)]
139pub struct ModelResponse {
140    /// The actual response text
141    pub content: String,
142    /// Usage statistics if available
143    pub usage: Option<TokenUsage>,
144    /// Model that generated the response
145    pub model_name: String,
146    /// Thinking/reasoning content (for models that expose their thought process)
147    pub thinking: Option<String>,
148    /// Tool calls from the model (Ollama native function calling)
149    pub tool_calls: Option<Vec<crate::models::tool_call::ToolCall>>,
150}
151
152/// Token usage statistics
153#[derive(Debug, Clone)]
154pub struct TokenUsage {
155    pub prompt_tokens: usize,
156    pub completion_tokens: usize,
157    pub total_tokens: usize,
158}
159
160/// Stream callback type for real-time response streaming
161pub type StreamCallback = Arc<dyn Fn(&str) + Send + Sync>;
162
163#[cfg(test)]
164mod tests {
165    use super::*;
166
167    #[test]
168    fn test_message_role_equality() {
169        let user1 = MessageRole::User;
170        let user2 = MessageRole::User;
171        let assistant = MessageRole::Assistant;
172
173        assert_eq!(user1, user2, "User roles should be equal");
174        assert_ne!(user1, assistant, "Different roles should not be equal");
175    }
176
177    #[test]
178    fn test_chat_message_constructors() {
179        let user = ChatMessage::user("Hello!");
180        assert_eq!(user.role, MessageRole::User);
181        assert_eq!(user.content, "Hello!");
182        assert!(user.tool_calls.is_none());
183
184        let assistant = ChatMessage::assistant("Hi there");
185        assert_eq!(assistant.role, MessageRole::Assistant);
186
187        let system = ChatMessage::system("You are helpful");
188        assert_eq!(system.role, MessageRole::System);
189
190        let tool = ChatMessage::tool("call_1", "read_file", "file contents");
191        assert_eq!(tool.role, MessageRole::Tool);
192        assert_eq!(tool.tool_call_id, Some("call_1".to_string()));
193        assert_eq!(tool.tool_name, Some("read_file".to_string()));
194    }
195
196    #[test]
197    fn test_chat_message_builders() {
198        let msg = ChatMessage::user("test")
199            .with_images(vec!["base64data".to_string()]);
200        assert_eq!(msg.images, Some(vec!["base64data".to_string()]));
201    }
202
203    #[test]
204    fn test_token_usage_structure() {
205        let usage = TokenUsage {
206            prompt_tokens: 100,
207            completion_tokens: 50,
208            total_tokens: 150,
209        };
210
211        assert_eq!(usage.prompt_tokens, 100);
212        assert_eq!(usage.completion_tokens, 50);
213        assert_eq!(usage.total_tokens, 150);
214    }
215
216    #[test]
217    fn test_model_response_creation() {
218        let usage = TokenUsage {
219            prompt_tokens: 100,
220            completion_tokens: 50,
221            total_tokens: 150,
222        };
223
224        let response = ModelResponse {
225            content: "Hello, world!".to_string(),
226            usage: Some(usage),
227            model_name: "ollama/tinyllama".to_string(),
228            thinking: None,
229            tool_calls: None,
230        };
231
232        assert_eq!(response.content, "Hello, world!");
233        assert!(response.usage.is_some());
234        assert_eq!(response.model_name, "ollama/tinyllama");
235        assert_eq!(response.usage.unwrap().total_tokens, 150);
236        assert!(response.tool_calls.is_none());
237    }
238}