Skip to main content

mermaid_cli/models/
types.rs

1use crate::agents::ActionDisplay;
2use serde::{Deserialize, Serialize};
3use std::sync::Arc;
4
5/// Represents a chat message
6#[derive(Debug, Clone, Serialize, Deserialize)]
7pub struct ChatMessage {
8    pub role: MessageRole,
9    pub content: String,
10    pub timestamp: chrono::DateTime<chrono::Local>,
11    /// Actions performed during this message (for display purposes)
12    #[serde(default)]
13    pub actions: Vec<ActionDisplay>,
14    /// Thinking/reasoning content (for models that expose their thought process)
15    #[serde(default)]
16    pub thinking: Option<String>,
17    /// Base64-encoded images/PDFs for multimodal models
18    #[serde(default)]
19    pub images: Option<Vec<String>>,
20    /// Tool calls from the model (Ollama native function calling)
21    #[serde(default)]
22    pub tool_calls: Option<Vec<crate::models::tool_call::ToolCall>>,
23    /// Tool call ID for tool result messages (OpenAI-compatible format)
24    /// This links the tool result back to the original tool_call from the assistant
25    #[serde(default)]
26    pub tool_call_id: Option<String>,
27    /// Tool name for tool result messages (required by Ollama API)
28    /// This tells the model which function's result is being returned
29    #[serde(default)]
30    pub tool_name: Option<String>,
31}
32
33impl ChatMessage {
34    /// Create a user message
35    pub fn user(content: impl Into<String>) -> Self {
36        Self::new(MessageRole::User, content.into())
37    }
38
39    /// Create an assistant message
40    pub fn assistant(content: impl Into<String>) -> Self {
41        Self::new(MessageRole::Assistant, content.into())
42    }
43
44    /// Create a system message
45    pub fn system(content: impl Into<String>) -> Self {
46        Self::new(MessageRole::System, content.into())
47    }
48
49    /// Create a tool result message
50    pub fn tool(
51        tool_call_id: impl Into<String>,
52        tool_name: impl Into<String>,
53        content: impl Into<String>,
54    ) -> Self {
55        Self {
56            role: MessageRole::Tool,
57            content: content.into(),
58            timestamp: chrono::Local::now(),
59            actions: Vec::new(),
60            thinking: None,
61            images: None,
62            tool_calls: None,
63            tool_call_id: Some(tool_call_id.into()),
64            tool_name: Some(tool_name.into()),
65        }
66    }
67
68    /// Base constructor with role and content
69    fn new(role: MessageRole, content: String) -> Self {
70        Self {
71            role,
72            content,
73            timestamp: chrono::Local::now(),
74            actions: Vec::new(),
75            thinking: None,
76            images: None,
77            tool_calls: None,
78            tool_call_id: None,
79            tool_name: None,
80        }
81    }
82
83    /// Builder: attach images
84    pub fn with_images(mut self, images: Vec<String>) -> Self {
85        self.images = Some(images);
86        self
87    }
88
89    /// Builder: attach tool calls
90    pub fn with_tool_calls(mut self, tool_calls: Vec<crate::models::tool_call::ToolCall>) -> Self {
91        self.tool_calls = if tool_calls.is_empty() {
92            None
93        } else {
94            Some(tool_calls)
95        };
96        self
97    }
98
99    /// Extract thinking blocks from message content
100    /// Returns (thinking_content, answer_content)
101    ///
102    /// Safety: `str::find()` returns byte offsets. The markers "Thinking..." and
103    /// "...done thinking." are pure ASCII, so adding their `.len()` to the byte
104    /// offset always lands on a valid UTF-8 char boundary.
105    pub fn extract_thinking(text: &str) -> (Option<String>, String) {
106        // Check if the text contains thinking blocks
107        if !text.contains("Thinking...") {
108            return (None, text.to_string());
109        }
110
111        // Find thinking block boundaries
112        if let Some(thinking_start) = text.find("Thinking...")
113            && let Some(thinking_end) = text.find("...done thinking.")
114        {
115            // Extract thinking content (everything between markers)
116            let thinking_content_start = thinking_start + "Thinking...".len();
117            let thinking_text = text[thinking_content_start..thinking_end]
118                .trim()
119                .to_string();
120
121            // Extract answer (everything after thinking block)
122            let answer_start = thinking_end + "...done thinking.".len();
123            let answer_text = text[answer_start..].trim().to_string();
124
125            return (Some(thinking_text), answer_text);
126        }
127
128        // If we found "Thinking..." but not the end marker, treat it all as thinking in progress
129        if let Some(thinking_start) = text.find("Thinking...") {
130            let thinking_content_start = thinking_start + "Thinking...".len();
131            let thinking_text = text[thinking_content_start..].trim().to_string();
132            return (Some(thinking_text), String::new());
133        }
134
135        (None, text.to_string())
136    }
137}
138
139#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
140pub enum MessageRole {
141    User,
142    Assistant,
143    System,
144    /// Tool result message (OpenAI-compatible format for function calling)
145    Tool,
146}
147
148/// Response from a model
149#[derive(Debug, Clone)]
150pub struct ModelResponse {
151    /// The actual response text
152    pub content: String,
153    /// Usage statistics if available
154    pub usage: Option<TokenUsage>,
155    /// Model that generated the response
156    pub model_name: String,
157    /// Thinking/reasoning content (for models that expose their thought process)
158    pub thinking: Option<String>,
159    /// Tool calls from the model (Ollama native function calling)
160    pub tool_calls: Option<Vec<crate::models::tool_call::ToolCall>>,
161}
162
163/// Token usage statistics
164#[derive(Debug, Clone)]
165pub struct TokenUsage {
166    pub prompt_tokens: usize,
167    pub completion_tokens: usize,
168    pub total_tokens: usize,
169}
170
171/// Stream callback type for real-time response streaming
172pub type StreamCallback = Arc<dyn Fn(&str) + Send + Sync>;
173
174#[cfg(test)]
175mod tests {
176    use super::*;
177
178    #[test]
179    fn test_message_role_equality() {
180        let user1 = MessageRole::User;
181        let user2 = MessageRole::User;
182        let assistant = MessageRole::Assistant;
183
184        assert_eq!(user1, user2, "User roles should be equal");
185        assert_ne!(user1, assistant, "Different roles should not be equal");
186    }
187
188    #[test]
189    fn test_chat_message_constructors() {
190        let user = ChatMessage::user("Hello!");
191        assert_eq!(user.role, MessageRole::User);
192        assert_eq!(user.content, "Hello!");
193        assert!(user.tool_calls.is_none());
194
195        let assistant = ChatMessage::assistant("Hi there");
196        assert_eq!(assistant.role, MessageRole::Assistant);
197
198        let system = ChatMessage::system("You are helpful");
199        assert_eq!(system.role, MessageRole::System);
200
201        let tool = ChatMessage::tool("call_1", "read_file", "file contents");
202        assert_eq!(tool.role, MessageRole::Tool);
203        assert_eq!(tool.tool_call_id, Some("call_1".to_string()));
204        assert_eq!(tool.tool_name, Some("read_file".to_string()));
205    }
206
207    #[test]
208    fn test_chat_message_builders() {
209        let msg = ChatMessage::user("test").with_images(vec!["base64data".to_string()]);
210        assert_eq!(msg.images, Some(vec!["base64data".to_string()]));
211    }
212
213    #[test]
214    fn test_token_usage_structure() {
215        let usage = TokenUsage {
216            prompt_tokens: 100,
217            completion_tokens: 50,
218            total_tokens: 150,
219        };
220
221        assert_eq!(usage.prompt_tokens, 100);
222        assert_eq!(usage.completion_tokens, 50);
223        assert_eq!(usage.total_tokens, 150);
224    }
225
226    #[test]
227    fn test_model_response_creation() {
228        let usage = TokenUsage {
229            prompt_tokens: 100,
230            completion_tokens: 50,
231            total_tokens: 150,
232        };
233
234        let response = ModelResponse {
235            content: "Hello, world!".to_string(),
236            usage: Some(usage),
237            model_name: "ollama/tinyllama".to_string(),
238            thinking: None,
239            tool_calls: None,
240        };
241
242        assert_eq!(response.content, "Hello, world!");
243        assert!(response.usage.is_some());
244        assert_eq!(response.model_name, "ollama/tinyllama");
245        assert_eq!(response.usage.unwrap().total_tokens, 150);
246        assert!(response.tool_calls.is_none());
247    }
248}