Skip to main content

aether_core/events/
agent_message.rs

1use acp_utils::notifications::{
2    SubAgentEvent, SubAgentToolCallUpdate, SubAgentToolError, SubAgentToolRequest, SubAgentToolResult,
3};
4use llm::{ToolCallError, ToolCallRequest, ToolCallResult};
5use mcp_utils::display_meta::ToolResultMeta;
6use serde::{Deserialize, Serialize};
7
8/// Message from the agent to the user.
9#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
10pub enum AgentMessage {
11    Text {
12        message_id: String,
13        chunk: String,
14        is_complete: bool,
15        model_name: String,
16    },
17
18    Thought {
19        message_id: String,
20        chunk: String,
21        is_complete: bool,
22        model_name: String,
23    },
24
25    ToolCall {
26        request: ToolCallRequest,
27        model_name: String,
28    },
29
30    ToolCallUpdate {
31        tool_call_id: String,
32        chunk: String,
33        model_name: String,
34    },
35
36    ToolProgress {
37        request: ToolCallRequest,
38        progress: f64,
39        total: Option<f64>,
40        message: Option<String>,
41    },
42
43    ToolResult {
44        result: ToolCallResult,
45        result_meta: Option<ToolResultMeta>,
46        model_name: String,
47    },
48
49    ToolError {
50        error: ToolCallError,
51        model_name: String,
52    },
53
54    Error {
55        message: String,
56    },
57
58    Cancelled {
59        message: String,
60    },
61
62    /// Context compaction has been triggered.
63    ContextCompactionStarted {
64        message_count: usize,
65    },
66
67    /// Context was compacted to reduce token usage.
68    ContextCompactionResult {
69        summary: String,
70        messages_removed: usize,
71    },
72
73    /// Context usage update for UI display.
74    ContextUsageUpdate {
75        /// Current usage ratio (0.0 - 1.0), if context window is known.
76        usage_ratio: Option<f64>,
77        /// Maximum context limit, if known.
78        context_limit: Option<u32>,
79        /// Input tokens on the most recent API call (the current context size).
80        input_tokens: u32,
81        /// Output tokens on the most recent API call.
82        output_tokens: u32,
83        /// Prompt tokens served from cache on the most recent API call.
84        cache_read_tokens: Option<u32>,
85        /// Prompt tokens written to cache on the most recent API call.
86        cache_creation_tokens: Option<u32>,
87        /// Reasoning tokens spent on the most recent API call.
88        reasoning_tokens: Option<u32>,
89        /// Cumulative input tokens since the agent started.
90        total_input_tokens: u64,
91        /// Cumulative output tokens since the agent started.
92        total_output_tokens: u64,
93        /// Cumulative cache-read tokens since the agent started.
94        total_cache_read_tokens: u64,
95        /// Cumulative cache-creation tokens since the agent started.
96        total_cache_creation_tokens: u64,
97        /// Cumulative reasoning tokens since the agent started.
98        total_reasoning_tokens: u64,
99    },
100
101    /// Agent is auto-continuing because LLM stopped with a resumable stop reason.
102    AutoContinue {
103        /// Current attempt number (1-indexed).
104        attempt: u32,
105        /// Maximum allowed attempts.
106        max_attempts: u32,
107    },
108
109    /// The model was successfully switched.
110    ModelSwitched {
111        previous: String,
112        new: String,
113    },
114
115    /// The agent context was cleared and reset to its blank state.
116    ContextCleared,
117
118    Done,
119}
120
121impl From<&AgentMessage> for SubAgentEvent {
122    fn from(msg: &AgentMessage) -> Self {
123        match msg {
124            AgentMessage::ToolCall { request, .. } => SubAgentEvent::ToolCall {
125                request: SubAgentToolRequest {
126                    id: request.id.clone(),
127                    name: request.name.clone(),
128                    arguments: request.arguments.clone(),
129                },
130            },
131            AgentMessage::ToolCallUpdate { tool_call_id, chunk, .. } => SubAgentEvent::ToolCallUpdate {
132                update: SubAgentToolCallUpdate { id: tool_call_id.clone(), chunk: chunk.clone() },
133            },
134            AgentMessage::ToolResult { result, result_meta, .. } => SubAgentEvent::ToolResult {
135                result: SubAgentToolResult {
136                    id: result.id.clone(),
137                    name: result.name.clone(),
138                    result_meta: result_meta.clone(),
139                },
140            },
141            AgentMessage::ToolError { error, .. } => {
142                SubAgentEvent::ToolError { error: SubAgentToolError { id: error.id.clone(), name: error.name.clone() } }
143            }
144            AgentMessage::Done => SubAgentEvent::Done,
145            _ => SubAgentEvent::Other,
146        }
147    }
148}
149
150impl AgentMessage {
151    pub fn text(message_id: &str, chunk: &str, is_complete: bool, model_name: &str) -> Self {
152        AgentMessage::Text {
153            message_id: message_id.to_string(),
154            chunk: chunk.to_string(),
155            is_complete,
156            model_name: model_name.to_string(),
157        }
158    }
159
160    pub fn thought(message_id: &str, chunk: &str, is_complete: bool, model_name: &str) -> Self {
161        AgentMessage::Thought {
162            message_id: message_id.to_string(),
163            chunk: chunk.to_string(),
164            is_complete,
165            model_name: model_name.to_string(),
166        }
167    }
168}
169
170#[cfg(test)]
171mod tests {
172    use super::AgentMessage;
173    use acp_utils::notifications::SubAgentEvent;
174    use llm::ToolCallResult;
175    use mcp_utils::display_meta::ToolDisplayMeta;
176
177    #[test]
178    fn test_model_switched_serde_roundtrip() {
179        let msg = AgentMessage::ModelSwitched {
180            previous: "anthropic:claude-3.5-sonnet".to_string(),
181            new: "ollama:llama3.2".to_string(),
182        };
183        let json = serde_json::to_string(&msg).unwrap();
184        let parsed: AgentMessage = serde_json::from_str(&json).unwrap();
185        assert_eq!(parsed, msg);
186    }
187
188    #[test]
189    fn test_thought_serde_roundtrip() {
190        let msg = AgentMessage::Thought {
191            message_id: "msg_1".to_string(),
192            chunk: "thinking".to_string(),
193            is_complete: false,
194            model_name: "test-model".to_string(),
195        };
196        let json = serde_json::to_string(&msg).unwrap();
197        let parsed: AgentMessage = serde_json::from_str(&json).unwrap();
198        assert_eq!(parsed, msg);
199    }
200
201    #[test]
202    fn test_thought_complete_serde_roundtrip() {
203        let msg = AgentMessage::Thought {
204            message_id: "msg_1".to_string(),
205            chunk: "full reasoning".to_string(),
206            is_complete: true,
207            model_name: "test-model".to_string(),
208        };
209        let json = serde_json::to_string(&msg).unwrap();
210        let parsed: AgentMessage = serde_json::from_str(&json).unwrap();
211        assert_eq!(parsed, msg);
212    }
213
214    #[test]
215    fn test_tool_result_serializes_result_meta() {
216        let msg = AgentMessage::ToolResult {
217            result: ToolCallResult {
218                id: "call_1".to_string(),
219                name: "coding__read_file".to_string(),
220                arguments: r#"{"filePath":"Cargo.toml"}"#.to_string(),
221                result: "ok".to_string(),
222            },
223            result_meta: Some(ToolDisplayMeta::new("Read file", "Cargo.toml, 156 lines").into()),
224            model_name: "test-model".to_string(),
225        };
226
227        let json = serde_json::to_value(&msg).unwrap();
228        let tool_result = &json["ToolResult"];
229        assert_eq!(tool_result["result_meta"]["display"]["title"], "Read file");
230        assert_eq!(tool_result["result_meta"]["display"]["value"], "Cargo.toml, 156 lines");
231
232        let parsed: AgentMessage = serde_json::from_value(json).unwrap();
233        assert_eq!(parsed, msg);
234    }
235
236    #[test]
237    fn test_sub_agent_tool_result_includes_display_fields() {
238        let msg = AgentMessage::ToolResult {
239            result: ToolCallResult {
240                id: "call_1".to_string(),
241                name: "coding__read_file".to_string(),
242                arguments: r#"{"filePath":"Cargo.toml"}"#.to_string(),
243                result: "ok".to_string(),
244            },
245            result_meta: Some(ToolDisplayMeta::new("Read file", "Cargo.toml, 156 lines").into()),
246            model_name: "test-model".to_string(),
247        };
248
249        let event: SubAgentEvent = (&msg).into();
250        match event {
251            SubAgentEvent::ToolResult { result } => {
252                assert_eq!(result.id, "call_1");
253                assert_eq!(result.name, "coding__read_file");
254                let result_meta = result.result_meta.expect("result_meta should be present");
255                assert_eq!(result_meta.display.title, "Read file");
256                assert_eq!(result_meta.display.value, "Cargo.toml, 156 lines");
257            }
258            other => panic!("Expected ToolResult, got {other:?}"),
259        }
260    }
261
262    #[test]
263    fn test_sub_agent_tool_call_update_includes_updated_fields() {
264        let msg = AgentMessage::ToolCallUpdate {
265            tool_call_id: "call_1".to_string(),
266            chunk: r#"{"filePath":"Cargo.toml"}"#.to_string(),
267            model_name: "test-model".to_string(),
268        };
269
270        let event: SubAgentEvent = (&msg).into();
271        match event {
272            SubAgentEvent::ToolCallUpdate { update } => {
273                assert_eq!(update.id, "call_1");
274                assert_eq!(update.chunk, r#"{"filePath":"Cargo.toml"}"#);
275            }
276            other => panic!("Expected ToolCallUpdate, got {other:?}"),
277        }
278    }
279}