1use aether_core::core::Prompt;
2use aether_core::events::{AgentMessage, UserMessage};
3use std::io;
4use std::process::ExitCode;
5use tokio::sync::mpsc;
6
7use super::error::CliError;
8use super::{OutputFormat, RunConfig};
9use crate::runtime::RuntimeBuilder;
10
11pub async fn run(config: RunConfig) -> Result<ExitCode, CliError> {
12 setup_tracing(config.verbose, &config.output);
13
14 let agent = RuntimeBuilder::from_spec(config.cwd.clone(), config.spec)
15 .mcp_config_opt(config.mcp_config)
16 .build(config.system_prompt.as_deref().map(Prompt::text), None)
17 .await?;
18
19 agent
20 .agent_tx
21 .send(UserMessage::text(&config.prompt))
22 .await
23 .map_err(|e| CliError::AgentError(format!("Failed to send prompt: {e}")))?;
24
25 Ok(stream_output(agent.agent_rx, &config.output).await)
26}
27
28async fn stream_output(mut rx: mpsc::Receiver<AgentMessage>, format: &OutputFormat) -> ExitCode {
29 while let Some(msg) = rx.recv().await {
30 match format {
31 OutputFormat::Text => emit_text(&msg),
32 OutputFormat::Pretty | OutputFormat::Json => emit_event(&msg),
33 }
34 if matches!(msg, AgentMessage::Done) {
35 break;
36 }
37 }
38 ExitCode::SUCCESS
39}
40
41fn format_text(msg: &AgentMessage) -> Option<String> {
42 match msg {
43 AgentMessage::Text { chunk, is_complete: true, .. } => Some(chunk.clone()),
44
45 AgentMessage::Thought { chunk, is_complete: true, .. } => Some(format!("Thought: {chunk}")),
46
47 AgentMessage::ToolCall { request, .. } => Some(format!("Tool call: {}({})", request.name, request.arguments)),
48
49 AgentMessage::ToolResult { result, .. } => Some(format!("Tool result [{}]: {}", result.name, result.result)),
50
51 AgentMessage::ToolError { error, .. } => Some(format!("Tool error [{}]: {}", error.name, error.error)),
52
53 AgentMessage::Error { message } => Some(format!("Error: {message}")),
54
55 AgentMessage::Cancelled { message } => Some(format!("Cancelled: {message}")),
56
57 AgentMessage::AutoContinue { attempt, max_attempts } => {
58 Some(format!("Continuing ({attempt}/{max_attempts})..."))
59 }
60
61 AgentMessage::ModelSwitched { previous, new } => Some(format!("Model switched: {previous} -> {new}")),
62
63 _ => None,
64 }
65}
66
67fn emit_text(msg: &AgentMessage) {
68 if let Some(text) = format_text(msg) {
69 if matches!(msg, AgentMessage::Error { .. }) {
70 eprintln!("{text}");
71 } else {
72 println!("{text}");
73 }
74 }
75}
76
77fn emit_event(msg: &AgentMessage) {
78 match msg {
79 AgentMessage::Text { chunk, is_complete: true, .. } => tracing::info!(target: "agent", "{chunk}"),
80
81 AgentMessage::Thought { chunk, is_complete: true, .. } => tracing::info!(target: "agent", thought = %chunk),
82
83 AgentMessage::ToolCall { request, .. } => {
84 tracing::info!(target: "agent", tool = %request.name, arguments = %request.arguments);
85 }
86
87 AgentMessage::ToolResult { result, .. } => {
88 tracing::info!(target: "agent", tool = %result.name, result = %result.result);
89 }
90
91 AgentMessage::ToolError { error, .. } => {
92 tracing::warn!(target: "agent", tool = %error.name, error = %error.error);
93 }
94
95 AgentMessage::Error { message } => tracing::error!(target: "agent", "{message}"),
96
97 AgentMessage::Cancelled { message } => {
98 tracing::info!(target: "agent", cancelled = %message);
99 }
100
101 AgentMessage::AutoContinue { attempt, max_attempts } => {
102 tracing::info!(target: "agent", "Continuing ({attempt}/{max_attempts})...");
103 }
104
105 AgentMessage::ModelSwitched { previous, new } => {
106 tracing::info!(target: "agent", "Model switched: {previous} -> {new}");
107 }
108
109 _ => {}
110 }
111}
112
113fn setup_tracing(verbose: bool, format: &OutputFormat) {
114 use tracing_subscriber::Layer;
115 use tracing_subscriber::filter::{self, EnvFilter};
116 use tracing_subscriber::fmt;
117 use tracing_subscriber::layer::SubscriberExt;
118 use tracing_subscriber::util::SubscriberInitExt;
119
120 let diag_filter = if verbose { EnvFilter::new("debug,agent=off") } else { EnvFilter::new("error,agent=off") };
121
122 let diag_layer = fmt::layer().with_writer(io::stderr).with_filter(diag_filter);
123
124 let agent_filter = filter::filter_fn(|meta| meta.target().starts_with("agent"));
125
126 match format {
127 OutputFormat::Text => {
128 if verbose {
129 tracing_subscriber::registry().with(diag_layer).init();
130 } else {
131 tracing_subscriber::registry().init();
133 }
134 }
135 OutputFormat::Pretty => {
136 let agent_layer = fmt::layer().with_writer(io::stdout).pretty().with_filter(agent_filter);
137 tracing_subscriber::registry().with(diag_layer).with(agent_layer).init();
138 }
139 OutputFormat::Json => {
140 let agent_layer = fmt::layer().with_writer(io::stdout).json().with_filter(agent_filter);
141 tracing_subscriber::registry().with(diag_layer).with(agent_layer).init();
142 }
143 }
144}
145
146#[cfg(test)]
147mod tests {
148 use super::*;
149 use std::sync::{Arc, Mutex};
150
151 use tracing_subscriber::Layer;
152 use tracing_subscriber::fmt;
153 use tracing_subscriber::layer::SubscriberExt;
154
155 fn with_test_subscriber<F: FnOnce()>(f: F) -> String {
156 let buf = Arc::new(Mutex::new(Vec::new()));
157 let buf_clone = Arc::clone(&buf);
158
159 let writer = move || -> TestWriter { TestWriter { buf: Arc::clone(&buf_clone) } };
160
161 let layer = fmt::layer()
162 .with_writer(writer)
163 .with_ansi(false)
164 .with_level(false)
165 .with_target(false)
166 .with_timer(fmt::time::uptime())
167 .with_filter(tracing_subscriber::filter::filter_fn(|meta| meta.target().starts_with("agent")));
168
169 let subscriber = tracing_subscriber::registry().with(layer);
170
171 tracing::subscriber::with_default(subscriber, f);
172
173 let bytes = buf.lock().unwrap();
174 String::from_utf8(bytes.clone()).unwrap()
175 }
176
177 #[derive(Clone)]
178 struct TestWriter {
179 buf: Arc<Mutex<Vec<u8>>>,
180 }
181
182 impl io::Write for TestWriter {
183 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
184 self.buf.lock().unwrap().extend_from_slice(data);
185 Ok(data.len())
186 }
187
188 fn flush(&mut self) -> io::Result<()> {
189 Ok(())
190 }
191 }
192
193 #[test]
196 fn emit_event_emits_complete_text() {
197 let output = with_test_subscriber(|| {
198 emit_event(&AgentMessage::text("id", "hello", true, "model"));
199 });
200 assert!(output.contains("hello"), "expected 'hello' in: {output}");
201 }
202
203 #[test]
204 fn emit_event_skips_incomplete_text() {
205 let output = with_test_subscriber(|| {
206 emit_event(&AgentMessage::text("id", "hello", false, "model"));
207 });
208 assert!(output.is_empty(), "expected empty output, got: {output}");
209 }
210
211 #[test]
212 fn emit_event_emits_complete_thought() {
213 let output = with_test_subscriber(|| {
214 emit_event(&AgentMessage::thought("id", "deep thinking", true, "model"));
215 });
216 assert!(output.contains("deep thinking"), "expected 'deep thinking' in: {output}");
217 }
218
219 #[test]
220 fn emit_event_skips_incomplete_thought() {
221 let output = with_test_subscriber(|| {
222 emit_event(&AgentMessage::thought("id", "partial", false, "model"));
223 });
224 assert!(output.is_empty(), "expected empty output, got: {output}");
225 }
226
227 #[test]
228 fn emit_event_emits_tool_call() {
229 let msg = AgentMessage::ToolCall {
230 request: llm::ToolCallRequest {
231 id: "tc1".to_string(),
232 name: "bash".to_string(),
233 arguments: "{}".to_string(),
234 },
235 model_name: "test".to_string(),
236 };
237 let output = with_test_subscriber(|| {
238 emit_event(&msg);
239 });
240 assert!(output.contains("bash"), "expected 'bash' in: {output}");
241 }
242
243 #[test]
244 fn emit_event_skips_tool_call_updates() {
245 let msg = AgentMessage::ToolCallUpdate {
246 tool_call_id: "tc1".to_string(),
247 chunk: "{\"partial".to_string(),
248 model_name: "test".to_string(),
249 };
250 let output = with_test_subscriber(|| {
251 emit_event(&msg);
252 });
253 assert!(output.is_empty(), "expected empty output, got: {output}");
254 }
255
256 #[test]
257 fn emit_event_emits_tool_result() {
258 let msg = AgentMessage::ToolResult {
259 result: llm::ToolCallResult {
260 id: "tc1".to_string(),
261 name: "bash".to_string(),
262 arguments: "{}".to_string(),
263 result: "ok".to_string(),
264 },
265 result_meta: None,
266 model_name: "test".to_string(),
267 };
268 let output = with_test_subscriber(|| {
269 emit_event(&msg);
270 });
271 assert!(output.contains("bash"), "expected 'bash' in: {output}");
272 assert!(output.contains("ok"), "expected 'ok' in: {output}");
273 }
274
275 #[test]
276 fn emit_event_emits_error() {
277 let msg = AgentMessage::Error { message: "something broke".to_string() };
278 let output = with_test_subscriber(|| {
279 emit_event(&msg);
280 });
281 assert!(output.contains("something broke"), "expected 'something broke' in: {output}");
282 }
283
284 #[test]
285 fn emit_event_skips_done() {
286 let output = with_test_subscriber(|| {
287 emit_event(&AgentMessage::Done);
288 });
289 assert!(output.is_empty(), "expected empty output, got: {output}");
290 }
291
292 #[test]
295 fn emit_text_formats_complete_text() {
296 assert_eq!(format_text(&AgentMessage::text("id", "hello world", true, "m")), Some("hello world".to_string()));
297 }
298
299 #[test]
300 fn emit_text_skips_incomplete_text() {
301 assert_eq!(format_text(&AgentMessage::text("id", "partial", false, "m")), None);
302 }
303
304 #[test]
305 fn emit_text_formats_complete_thought() {
306 assert_eq!(
307 format_text(&AgentMessage::thought("id", "reasoning here", true, "m")),
308 Some("Thought: reasoning here".to_string())
309 );
310 }
311
312 #[test]
313 fn emit_text_skips_incomplete_thought() {
314 assert_eq!(format_text(&AgentMessage::thought("id", "partial", false, "m")), None);
315 }
316
317 #[test]
318 fn emit_text_formats_tool_call() {
319 let msg = AgentMessage::ToolCall {
320 request: llm::ToolCallRequest {
321 id: "tc1".to_string(),
322 name: "bash".to_string(),
323 arguments: r#"{"cmd":"ls"}"#.to_string(),
324 },
325 model_name: "test".to_string(),
326 };
327 assert_eq!(format_text(&msg), Some(r#"Tool call: bash({"cmd":"ls"})"#.to_string()));
328 }
329
330 #[test]
331 fn emit_text_skips_tool_call_updates() {
332 let msg = AgentMessage::ToolCallUpdate {
333 tool_call_id: "tc1".to_string(),
334 chunk: "partial".to_string(),
335 model_name: "test".to_string(),
336 };
337 assert_eq!(format_text(&msg), None);
338 }
339
340 #[test]
341 fn emit_text_formats_tool_result() {
342 let msg = AgentMessage::ToolResult {
343 result: llm::ToolCallResult {
344 id: "tc1".to_string(),
345 name: "bash".to_string(),
346 arguments: "{}".to_string(),
347 result: "output".to_string(),
348 },
349 result_meta: None,
350 model_name: "test".to_string(),
351 };
352 assert_eq!(format_text(&msg), Some("Tool result [bash]: output".to_string()));
353 }
354
355 #[test]
356 fn emit_text_formats_tool_error() {
357 let msg = AgentMessage::ToolError {
358 error: llm::ToolCallError {
359 id: "tc1".to_string(),
360 name: "bash".to_string(),
361 arguments: None,
362 error: "not found".to_string(),
363 },
364 model_name: "test".to_string(),
365 };
366 assert_eq!(format_text(&msg), Some("Tool error [bash]: not found".to_string()));
367 }
368
369 #[test]
370 fn emit_text_formats_error() {
371 let msg = AgentMessage::Error { message: "boom".to_string() };
372 assert_eq!(format_text(&msg), Some("Error: boom".to_string()));
373 }
374
375 #[test]
376 fn emit_text_formats_cancelled() {
377 let msg = AgentMessage::Cancelled { message: "user stopped".to_string() };
378 assert_eq!(format_text(&msg), Some("Cancelled: user stopped".to_string()));
379 }
380
381 #[test]
382 fn emit_text_formats_auto_continue() {
383 let msg = AgentMessage::AutoContinue { attempt: 2, max_attempts: 5 };
384 assert_eq!(format_text(&msg), Some("Continuing (2/5)...".to_string()));
385 }
386
387 #[test]
388 fn emit_text_formats_model_switched() {
389 let msg = AgentMessage::ModelSwitched { previous: "old-model".to_string(), new: "new-model".to_string() };
390 assert_eq!(format_text(&msg), Some("Model switched: old-model -> new-model".to_string()));
391 }
392
393 #[test]
394 fn emit_text_skips_done() {
395 assert_eq!(format_text(&AgentMessage::Done), None);
396 }
397}