1use super::config::AgentConfig;
4use crate::agent::prompt::{build_system_prompt_with_context, build_user_message};
5use crate::agent::state::PersistedAgentContext;
6use crate::agent::tokens::ConversationManager;
7use crate::agent::{Agent, AgentExecution, AgentResult};
8use crate::error::{AgentError, Result};
9use crate::llm::{ChatOptions, LlmClient, LlmMessage};
10use crate::output::{
11 AgentEvent, AgentExecutionContext, AgentOutput, TokenUsage, ToolExecutionInfo,
12 ToolExecutionInfoBuilder, ToolExecutionStatus,
13};
14use crate::tools::{ToolExecutor, ToolRegistry};
15use crate::trajectory::{TrajectoryEntry, TrajectoryRecorder};
16use async_trait::async_trait;
17use std::path::Path;
18use std::sync::Arc;
19use std::time::Instant;
20
21pub struct AgentCore {
23 config: AgentConfig,
24 llm_client: Arc<dyn LlmClient>,
25 tool_executor: ToolExecutor,
26 trajectory_recorder: Option<TrajectoryRecorder>,
27 conversation_history: Vec<LlmMessage>,
28 output: Box<dyn AgentOutput>,
29 #[allow(dead_code)]
30 current_task_displayed: bool,
31 execution_context: Option<AgentExecutionContext>,
32 conversation_manager: ConversationManager,
33 abort_controller: crate::agent::AbortController,
35 abort_registration: crate::agent::AbortRegistration,
37}
38
39impl AgentCore {
40 pub async fn new_with_llm_config(
42 agent_config: AgentConfig,
43 llm_config: crate::config::ResolvedLlmConfig,
44 output: Box<dyn AgentOutput>,
45 abort_controller: Option<crate::agent::AbortController>,
46 ) -> Result<Self> {
47 let llm_client: Arc<dyn LlmClient> = match llm_config.protocol {
49 crate::config::Protocol::OpenAICompat => {
50 Arc::new(crate::llm::OpenAiClient::new(&llm_config)?)
51 }
52 crate::config::Protocol::Anthropic => {
53 Arc::new(crate::llm::AnthropicClient::new(&llm_config)?)
54 }
55 crate::config::Protocol::GoogleAI => {
56 return Err(AgentError::NotInitialized.into()); }
58 crate::config::Protocol::AzureOpenAI => {
59 Arc::new(crate::llm::OpenAiClient::new(&llm_config)?)
61 }
62 crate::config::Protocol::Custom(_) => {
63 return Err(AgentError::NotInitialized.into()); }
65 };
66
67 let tool_registry = crate::tools::ToolRegistry::default();
69 let tool_executor = tool_registry.create_executor(&agent_config.tools);
70
71 let max_tokens = llm_config.params.max_tokens.unwrap_or(8192);
73 let conversation_manager = ConversationManager::new(max_tokens, llm_client.clone());
74
75 let (abort_controller, abort_registration) = if let Some(controller) = abort_controller {
77 let registration = controller.subscribe();
78 (controller, registration)
79 } else {
80 crate::agent::AbortController::new()
82 };
83
84 Ok(Self {
85 config: agent_config,
86 llm_client,
87 tool_executor,
88 trajectory_recorder: None,
89 conversation_history: Vec::new(),
90 output,
91 current_task_displayed: false,
92 execution_context: None,
93 conversation_manager,
94 abort_controller,
95 abort_registration,
96 })
97 }
98
99 pub fn export_context_snapshot(&self) -> Result<PersistedAgentContext> {
101 Ok(PersistedAgentContext::new(
102 self.agent_type().to_string(),
103 Some(self.config.clone()),
104 self.conversation_history.clone(),
105 self.execution_context.clone(),
106 ))
107 }
108
109 pub fn export_context_json(&self) -> Result<String> {
111 let snap = self.export_context_snapshot()?;
112 snap.to_json()}
113
114 pub fn export_context_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()> {
116 let snap = self.export_context_snapshot()?;
117 snap.to_file(path.as_ref())
118 }
119
120 pub fn restore_context_from_snapshot(&mut self, snapshot: PersistedAgentContext) -> Result<()> {
122 if let Some(cfg) = snapshot.config {
124 self.config = cfg;
125 }
126
127 self.conversation_history = snapshot.conversation_history;
129 self.execution_context = snapshot.execution_context;
130
131 Ok(())
134 }
135
136 pub fn restore_context_from_json(&mut self, json: &str) -> Result<()> {
138 let snapshot = PersistedAgentContext::from_json(json)?;
139 self.restore_context_from_snapshot(snapshot)
140 }
141
142 pub fn restore_context_from_file<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
144 let snapshot = PersistedAgentContext::from_file(path.as_ref())?;
145 self.restore_context_from_snapshot(snapshot)
146 }
147
148 pub fn restore_from_history(&mut self, history: Vec<LlmMessage>) -> Result<()> {
150 self.conversation_history = history;
151 self.execution_context = None;
153 Ok(())
154 }
155
156 pub fn config(&self) -> &AgentConfig {
158 &self.config
159 }
160
161 pub fn cancel(&self) {
163 self.abort_controller.cancel();
164 }
165
166 pub fn set_abort_controller(&mut self, abort_controller: crate::agent::AbortController) {
168 self.abort_registration = abort_controller.subscribe();
169 self.abort_controller = abort_controller;
170 }
171
172 pub async fn new_with_output_and_registry(
174 agent_config: AgentConfig,
175 llm_config: crate::config::ResolvedLlmConfig,
176 output: Box<dyn AgentOutput>,
177 tool_registry: ToolRegistry,
178 abort_controller: Option<crate::agent::AbortController>,
179 ) -> Result<Self> {
180 let llm_client: Arc<dyn LlmClient> = match llm_config.protocol {
182 crate::config::Protocol::OpenAICompat => {
183 Arc::new(crate::llm::OpenAiClient::new(&llm_config)?)
184 }
185 crate::config::Protocol::Anthropic => {
186 Arc::new(crate::llm::AnthropicClient::new(&llm_config)?)
187 }
188 crate::config::Protocol::GoogleAI => {
189 return Err(AgentError::NotInitialized.into()); }
191 crate::config::Protocol::AzureOpenAI => {
192 Arc::new(crate::llm::OpenAiClient::new(&llm_config)?)
194 }
195 crate::config::Protocol::Custom(_) => {
196 return Err(AgentError::NotInitialized.into()); }
198 };
199
200 let tool_executor = tool_registry.create_executor(&agent_config.tools);
202
203 let max_tokens = llm_config.params.max_tokens.unwrap_or(8192);
205 let conversation_manager = ConversationManager::new(max_tokens, llm_client.clone());
206
207 let (abort_controller, abort_registration) = if let Some(controller) = abort_controller {
209 let registration = controller.subscribe();
210 (controller, registration)
211 } else {
212 crate::agent::AbortController::new()
214 };
215
216 Ok(Self {
217 config: agent_config,
218 llm_client,
219 tool_executor,
220 trajectory_recorder: None,
221 conversation_history: Vec::new(),
222 output,
223 current_task_displayed: false,
224 execution_context: None,
225 conversation_manager,
226 abort_controller,
227 abort_registration,
228 })
229 }
230
231 pub async fn new(
233 agent_config: AgentConfig,
234 llm_config: crate::config::ResolvedLlmConfig,
235 ) -> Result<Self> {
236 use crate::output::events::NullOutput;
237 Self::new_with_llm_config(agent_config, llm_config, Box::new(NullOutput), None).await
238 }
239
240 pub fn set_system_prompt(&mut self, system_prompt: Option<String>) {
243 self.config.system_prompt = system_prompt;
244 }
245
246 pub fn get_configured_system_prompt(&self) -> Option<&String> {
248 self.config.system_prompt.as_ref()
249 }
250
251 fn get_system_prompt(&self, project_path: &Path) -> String {
253 let base_prompt = if let Some(custom_prompt) = &self.config.system_prompt {
255 let system_context = crate::agent::prompt::build_system_context();
257
258 format!(
259 "{}\n\n\
260 [System Context]:\n{}",
261 custom_prompt, system_context
262 )
263 } else {
264 build_system_prompt_with_context(project_path)
266 };
267
268 format!(
269 "{}\n\nAvailable tools: {}",
270 base_prompt,
271 self.tool_executor.list_tools().join(", ")
272 )
273 }
274
275 async fn execute_step(&mut self, step: usize, project_path: &Path) -> Result<bool> {
277 let mut cancel_reg = self.abort_registration.clone();
279
280 tokio::select! {
282 _ = cancel_reg.cancelled() => {
283 let _ = self.output.normal("⏹ Task interrupted by user").await;
285 return Err("Task interrupted by user".into());
286 }
287 result = self.execute_step_inner(step, project_path) => {
288 result
289 }
290 }
291 }
292
293 async fn execute_step_inner(&mut self, step: usize, project_path: &Path) -> Result<bool> {
295 let mut messages = Vec::new();
297 let needs_system_prompt = self.conversation_history.is_empty()
298 || !matches!(
299 self.conversation_history[0].role,
300 crate::llm::MessageRole::System
301 );
302
303 if needs_system_prompt {
304 messages.push(LlmMessage::system(self.get_system_prompt(project_path)));
305 }
306 messages.extend(self.conversation_history.clone());
307
308 if let Some(recorder) = &self.trajectory_recorder {
310 recorder
311 .record(TrajectoryEntry::llm_request(
312 messages.clone(),
313 self.llm_client.model_name().to_string(),
314 self.llm_client.provider_name().to_string(),
315 step,
316 ))
317 .await?;
318 }
319
320 let tool_definitions = self.tool_executor.get_tool_definitions();
322
323 let options = Some(ChatOptions {
325 ..Default::default()
326 });
327
328 let response = match self
330 .llm_client
331 .chat_completion(messages, Some(tool_definitions), options)
332 .await
333 {
334 Ok(response) => response,
335 Err(e) => {
336 tracing::error!("❌ LLM request failed for step {}: {}", step, e);
337 let _ = self
338 .output
339 .error(&format!("LLM request failed: {}", e))
340 .await;
341 return Err(e);
342 }
343 };
344
345 if let Some(usage) = &response.usage {
347 if let Some(context) = &mut self.execution_context {
348 context.token_usage.input_tokens += usage.prompt_tokens;
349 context.token_usage.output_tokens += usage.completion_tokens;
350 context.token_usage.total_tokens += usage.total_tokens;
351
352 self.output
354 .emit_token_update(context.token_usage.clone())
355 .await
356 .unwrap_or_else(|e| {
357 let _ = futures::executor::block_on(
358 self.output
359 .debug(&format!("Failed to emit token update event: {}", e)),
360 );
361 });
362 }
363 }
364
365 if let Some(recorder) = &self.trajectory_recorder {
367 recorder
368 .record(TrajectoryEntry::llm_response(
369 response.message.clone(),
370 response.usage.clone(),
371 response.finish_reason.as_ref().map(|r| format!("{:?}", r)),
372 step,
373 ))
374 .await?;
375 }
376
377 self.conversation_history.push(response.message.clone());
379
380 if response.message.has_tool_use() {
382 let tool_uses = response.message.get_tool_uses();
383
384 for tool_use in &tool_uses {
385 if let crate::llm::ContentBlock::ToolUse { id, name, input } = tool_use {
386 let tool_call = crate::tools::ToolCall {
388 id: id.clone(),
389 name: name.clone(),
390 parameters: input.clone(),
391 metadata: None,
392 };
393
394 let tool_info = ToolExecutionInfo::create_tool_execution_info(
396 &tool_call,
397 ToolExecutionStatus::Executing,
398 None,
399 );
400
401 self.output
402 .emit_event(AgentEvent::ToolExecutionStarted {
403 tool_info: tool_info.clone(),
404 })
405 .await
406 .unwrap_or_else(|e| {
407 let _ = futures::executor::block_on(self.output.debug(&format!(
408 "Failed to emit tool execution started event: {}",
409 e
410 )));
411 });
412
413 if let Some(recorder) = &self.trajectory_recorder {
415 recorder
416 .record(TrajectoryEntry::tool_call(tool_call.clone(), step))
417 .await?;
418 }
419
420 let needs_confirm = self
422 .tool_executor
423 .get_tool(name)
424 .map(|t| t.requires_confirmation())
425 .unwrap_or(false);
426
427 let tool_result = if needs_confirm {
428 let mut meta = std::collections::HashMap::new();
430 meta.insert(
431 "tool_name".to_string(),
432 serde_json::Value::String(name.clone()),
433 );
434 meta.insert("parameters".to_string(), input.clone());
435 meta.insert(
436 "tool_call_id".to_string(),
437 serde_json::Value::String(id.clone()),
438 );
439
440 let request = crate::output::ConfirmationRequest {
441 id: id.clone(),
442 kind: crate::output::ConfirmationKind::ToolExecution,
443 title: format!("Execute tool: {}", name),
444 message: "This tool requires confirmation before execution."
445 .to_string(),
446 metadata: meta,
447 };
448
449 let decision = self.output.request_confirmation(&request).await.unwrap_or(
450 crate::output::ConfirmationDecision {
451 approved: false,
452 note: Some("Failed to obtain confirmation".to_string()),
453 },
454 );
455
456 if !decision.approved {
457 crate::tools::ToolResult::error(
458 id.clone(),
459 "Execution cancelled by user".to_string(),
460 )
461 } else {
462 match self.tool_executor.execute(tool_call.clone()).await {
464 Ok(result) => result,
465 Err(e) => {
466 tracing::error!("Tool execution failed for {}: {}", name, e);
467 crate::tools::ToolResult::error(
468 id.clone(),
469 format!("Tool execution failed: {}", e),
470 )
471 }
472 }
473 }
474 } else {
475 match self.tool_executor.execute(tool_call.clone()).await {
477 Ok(result) => result,
478 Err(e) => {
479 tracing::error!("Tool execution failed for {}: {}", name, e);
480 crate::tools::ToolResult::error(
481 id.clone(),
482 format!("Tool execution failed: {}", e),
483 )
484 }
485 }
486 };
487
488 let completed_tool_info = ToolExecutionInfo::create_tool_execution_info(
490 &tool_call,
491 if tool_result.success {
492 ToolExecutionStatus::Success
493 } else {
494 ToolExecutionStatus::Error
495 },
496 Some(&tool_result),
497 );
498
499 self.output
500 .emit_event(AgentEvent::ToolExecutionCompleted {
501 tool_info: completed_tool_info,
502 })
503 .await
504 .unwrap_or_else(|e| {
505 let _ = futures::executor::block_on(self.output.debug(&format!(
506 "Failed to emit tool execution completed event: {}",
507 e
508 )));
509 });
510
511 if name == "sequentialthinking" {
513 if let Some(data) = &tool_result.data {
515 if let Some(thought) = data.get("thought") {
516 if let Some(thought_str) = thought.as_str() {
517 self.output
518 .emit_event(AgentEvent::AgentThinking {
519 step_number: step,
520 thinking: thought_str.to_string(),
521 })
522 .await
523 .unwrap_or_else(|e| {
524 let _ = futures::executor::block_on(self.output.debug(
525 &format!("Failed to emit thinking event: {}", e),
526 ));
527 });
528 }
529 }
530 } else {
531 if let Some(start) = tool_result.content.find("Thought: ") {
533 let thought_start = start + "Thought: ".len();
534 if let Some(end) = tool_result.content[thought_start..].find("\n\n")
535 {
536 let thought =
537 &tool_result.content[thought_start..thought_start + end];
538 self.output
539 .emit_event(AgentEvent::AgentThinking {
540 step_number: step,
541 thinking: thought.to_string(),
542 })
543 .await
544 .unwrap_or_else(|e| {
545 let _ = futures::executor::block_on(self.output.debug(
546 &format!("Failed to emit thinking event: {}", e),
547 ));
548 });
549 }
550 }
551 }
552 }
553
554 if let Some(recorder) = &self.trajectory_recorder {
556 recorder
557 .record(TrajectoryEntry::tool_result(tool_result.clone(), step))
558 .await?;
559 }
560
561 if name == "task_done" && tool_result.success {
563 return Ok(true); }
565
566 let result_message = LlmMessage {
568 role: crate::llm::MessageRole::Tool,
569 content: crate::llm::MessageContent::MultiModal(vec![
570 crate::llm::ContentBlock::ToolResult {
571 tool_use_id: id.clone(),
572 is_error: Some(!tool_result.success),
573 content: tool_result.content,
574 },
575 ]),
576 metadata: None,
577 };
578
579 self.conversation_history.push(result_message);
580 }
581 }
582
583 return Ok(false);
587 }
588
589 if let Some(text_content) = response.message.get_text() {
591 if !text_content.trim().is_empty() {
592 self.output.normal(&text_content).await.unwrap_or_else(|e| {
594 let _ = futures::executor::block_on(
595 self.output
596 .debug(&format!("Failed to emit agent response message: {}", e)),
597 );
598 });
599 }
600 }
601
602 Ok(false)
604 }
605}
606
607#[async_trait]
608impl Agent for AgentCore {
609 async fn execute_task(&mut self, task: &str) -> AgentResult<AgentExecution> {
610 let current_dir = std::env::current_dir().unwrap_or_else(|_| std::path::PathBuf::from("."));
612 self.execute_task_with_context(task, ¤t_dir).await
613 }
614
615 fn config(&self) -> &AgentConfig {
616 &self.config
617 }
618
619 fn agent_type(&self) -> &str {
620 "coro_agent"
621 }
622
623 fn set_trajectory_recorder(&mut self, recorder: TrajectoryRecorder) {
624 self.trajectory_recorder = Some(recorder);
625 }
626
627 fn trajectory_recorder(&self) -> Option<&TrajectoryRecorder> {
628 self.trajectory_recorder.as_ref()
629 }
630}
631
632impl AgentCore {
633 async fn apply_intelligent_compression(&mut self) -> Result<()> {
638 match self
640 .conversation_manager
641 .maybe_compress(
642 self.conversation_history.clone(),
643 self.execution_context.as_ref(),
644 )
645 .await
646 {
647 Ok(result) => {
648 self.conversation_history = result.messages;
650
651 if let Some(summary) = result.compression_applied {
653 let _ = self
655 .output
656 .emit_event(AgentEvent::CompressionStarted {
657 level: summary.level.as_str().to_string(),
658 current_tokens: summary.tokens_before,
659 target_tokens: summary.tokens_after,
660 reason: format!(
661 "Token usage requires {} compression",
662 summary.level.as_str()
663 ),
664 })
665 .await;
666
667 let _ = self
669 .output
670 .emit_event(AgentEvent::CompressionCompleted {
671 summary: summary.summary.clone(),
672 tokens_saved: summary.tokens_saved,
673 messages_before: summary.messages_before,
674 messages_after: summary.messages_after,
675 })
676 .await;
677
678 tracing::info!("Compression completed: {}", summary.summary);
679 }
680 }
681 Err(e) => {
682 tracing::warn!(
683 "Compression failed: {}. Falling back to simple trimming.",
684 e
685 );
686 self.fallback_trim_conversation_history(50);
687
688 let _ = self
689 .output
690 .emit_event(AgentEvent::CompressionFailed {
691 error: e.to_string(),
692 fallback_action: "Simple message trimming applied".to_string(),
693 })
694 .await;
695 }
696 }
697
698 Ok(())
699 }
700
701 fn fallback_trim_conversation_history(&mut self, max_messages: usize) {
703 if self.conversation_history.len() <= max_messages {
704 return;
705 }
706
707 let mut new_history = Vec::new();
709 if let Some(first_msg) = self.conversation_history.first() {
710 if matches!(first_msg.role, crate::llm::MessageRole::System) {
711 new_history.push(first_msg.clone());
712 }
713 }
714
715 let keep_count = max_messages.saturating_sub(1); let start_index = self.conversation_history.len().saturating_sub(keep_count);
718
719 let skip_first = !new_history.is_empty();
721 let iter_start = if skip_first {
722 std::cmp::max(1, start_index)
723 } else {
724 start_index
725 };
726
727 new_history.extend(self.conversation_history[iter_start..].iter().cloned());
728
729 self.conversation_history = new_history;
730 }
731
732 pub async fn execute_task_with_context(
734 &mut self,
735 task: &str,
736 project_path: &Path,
737 ) -> AgentResult<AgentExecution> {
738 let start_time = Instant::now();
739
740 if self.execution_context.is_none() {
742 self.execution_context = Some(AgentExecutionContext {
743 agent_id: "coro_agent".to_string(),
744 original_goal: task.to_string(),
745 current_task: task.to_string(),
746 project_path: project_path.to_string_lossy().to_string(),
747 max_steps: self.config.max_steps,
748 current_step: 0,
749 execution_time: std::time::Duration::from_secs(0),
750 token_usage: TokenUsage::default(),
751 });
752 } else {
753 if let Some(context) = &mut self.execution_context {
755 context.current_task = task.to_string();
756 context.current_step = 0;
757 }
758 }
759
760 if let Some(context) = &self.execution_context {
762 self.output
763 .emit_event(AgentEvent::ExecutionStarted {
764 context: context.clone(),
765 })
766 .await
767 .unwrap_or_else(|e| {
768 let _ = futures::executor::block_on(
769 self.output
770 .debug(&format!("Failed to emit execution started event: {}", e)),
771 );
772 });
773 }
774
775 if let Some(recorder) = &self.trajectory_recorder {
777 recorder
778 .record(TrajectoryEntry::task_start(
779 task.to_string(),
780 serde_json::to_value(&self.config).unwrap_or_default(),
781 ))
782 .await?;
783 }
784
785 if self.conversation_history.is_empty() {
787 self.conversation_history
788 .push(LlmMessage::system(self.get_system_prompt(project_path)));
789 }
790
791 let needs_synthetic_results = if let Some(last_msg) = self.conversation_history.last() {
794 matches!(last_msg.role, crate::llm::MessageRole::Assistant) && last_msg.has_tool_use()
795 } else {
796 false
797 };
798
799 if needs_synthetic_results {
800 let last_msg = self.conversation_history.last().unwrap().clone();
802
803 let tool_uses = last_msg.get_tool_uses();
807 let mut error_results = Vec::new();
808
809 for tool_use in &tool_uses {
810 if let crate::llm::ContentBlock::ToolUse { id, .. } = tool_use {
811 let error_result = LlmMessage {
812 role: crate::llm::MessageRole::Tool,
813 content: crate::llm::MessageContent::MultiModal(vec![
814 crate::llm::ContentBlock::ToolResult {
815 tool_use_id: id.clone(),
816 is_error: Some(true),
817 content: "Previous task interrupted or incomplete".to_string(),
818 },
819 ]),
820 metadata: None,
821 };
822 error_results.push(error_result);
823 }
824 }
825
826 for error_result in error_results {
828 self.conversation_history.push(error_result);
829 }
830
831 if !tool_uses.is_empty() {
832 tracing::warn!(
833 "Added synthetic tool results for incomplete tool calls from previous task"
834 );
835 }
836 }
837
838 let user_message = build_user_message(task);
840 self.conversation_history
841 .push(LlmMessage::user(&user_message));
842
843 let mut step = 0;
844 let mut task_completed = false;
845
846 let mut interrupted = false;
847 let mut cancel_reg = self.abort_registration.clone();
849
850 while step < self.config.max_steps && !task_completed {
852 step += 1;
853
854 if cancel_reg.is_cancelled() {
856 interrupted = true;
857 break;
858 }
859
860 self.apply_intelligent_compression().await?;
862
863 if cancel_reg.is_cancelled() {
865 interrupted = true;
866 break;
867 }
868
869 tokio::select! {
871 _ = cancel_reg.cancelled() => {
872 interrupted = true;
873 break;
874 }
875 result = self.execute_step(step, project_path) => {
876 match result {
877 Ok(completed) => {
878 task_completed = completed;
879
880 if let Some(recorder) = &self.trajectory_recorder {
882 recorder
883 .record(TrajectoryEntry::step_complete(
884 format!("Step {} completed", step),
885 true,
886 step,
887 ))
888 .await?;
889 }
890 }
891 Err(e) => {
892 if let Some(recorder) = &self.trajectory_recorder {
894 recorder
895 .record(TrajectoryEntry::error(
896 e.to_string(),
897 Some(format!("Step {}", step)),
898 step,
899 ))
900 .await?;
901 }
902
903 let duration = start_time.elapsed().as_millis() as u64;
904 return Ok(AgentExecution::failure(
905 format!("Error in step {}: {}", step, e),
906 step,
907 duration,
908 ));
909
910 }
911 }
912 }
913 }
914 }
915
916 let duration = start_time.elapsed();
917
918 if let Some(context) = &mut self.execution_context {
920 context.current_step = step;
921 context.execution_time = duration;
922 }
923
924 if let Some(recorder) = &self.trajectory_recorder {
926 recorder
927 .record(TrajectoryEntry::task_complete(
928 task_completed,
929 if task_completed {
930 "Task completed successfully".to_string()
931 } else {
932 format!("Task incomplete after {} steps", step)
933 },
934 step,
935 duration.as_millis() as u64,
936 ))
937 .await?;
938 }
939
940 if let Some(context) = &self.execution_context {
942 let summary = if task_completed {
943 "Task completed successfully".to_string()
944 } else {
945 format!("Task incomplete after {} steps", step)
946 };
947
948 if interrupted {
950 if let Some(context) = &self.execution_context {
951 self.output
952 .emit_event(AgentEvent::ExecutionInterrupted {
953 context: context.clone(),
954 reason: "Execution interrupted by user".to_string(),
955 })
956 .await
957 .unwrap_or_else(|e| {
958 let _ = futures::executor::block_on(self.output.debug(&format!(
959 "Failed to emit execution interrupted event: {}",
960 e
961 )));
962 });
963 }
964 let duration_ms = duration.as_millis() as u64;
965 return Ok(AgentExecution::failure(
966 "Execution interrupted".to_string(),
967 step,
968 duration_ms,
969 ));
970 }
971
972 self.output
973 .emit_event(AgentEvent::ExecutionCompleted {
974 context: context.clone(),
975 success: task_completed,
976 summary: summary.clone(),
977 })
978 .await
979 .unwrap_or_else(|e| {
980 let _ = futures::executor::block_on(
981 self.output
982 .debug(&format!("Failed to emit execution completed event: {}", e)),
983 );
984 });
985 }
986
987 let duration_ms = duration.as_millis() as u64;
988
989 if task_completed {
990 Ok(AgentExecution::success(
991 "Task completed successfully".to_string(),
992 step,
993 duration_ms,
994 ))
995 } else {
996 Ok(AgentExecution::failure(
997 format!("Task incomplete after {} steps", step),
998 step,
999 duration_ms,
1000 ))
1001 }
1002 }
1003}
1004
1005#[cfg(test)]
1006mod tests {
1007 use super::*;
1008 use crate::error::Result;
1009 use crate::llm::{
1010 ChatOptions, LlmClient, LlmMessage, LlmResponse, MessageContent, MessageRole,
1011 ToolDefinition,
1012 };
1013 use crate::AgentConfig;
1014 use async_trait::async_trait;
1015
1016 struct MockLlmClient;
1018
1019 impl MockLlmClient {
1020 fn new() -> Self {
1021 Self
1022 }
1023 }
1024
1025 #[async_trait]
1026 impl LlmClient for MockLlmClient {
1027 async fn chat_completion(
1028 &self,
1029 _messages: Vec<LlmMessage>,
1030 _tools: Option<Vec<ToolDefinition>>,
1031 _options: Option<ChatOptions>,
1032 ) -> Result<LlmResponse> {
1033 Ok(LlmResponse {
1034 message: LlmMessage {
1035 role: MessageRole::Assistant,
1036 content: MessageContent::Text("Mock response".to_string()),
1037 metadata: None,
1038 },
1039 usage: None,
1040 model: "mock-model".to_string(),
1041 finish_reason: None,
1042 metadata: None,
1043 })
1044 }
1045
1046 fn model_name(&self) -> &str {
1047 "mock-model"
1048 }
1049
1050 fn provider_name(&self) -> &str {
1051 "mock"
1052 }
1053 }
1054
1055 #[test]
1056 fn test_system_prompt_configuration() {
1057 let agent_config = AgentConfig {
1059 system_prompt: Some("Custom system prompt for testing".to_string()),
1060 ..Default::default()
1061 };
1062
1063 assert_eq!(
1064 agent_config.system_prompt,
1065 Some("Custom system prompt for testing".to_string())
1066 );
1067
1068 let default_config = AgentConfig::default();
1070 assert_eq!(default_config.system_prompt, None);
1071 }
1072
1073 #[test]
1074 fn test_system_prompt_serialization() {
1075 let config = AgentConfig {
1077 system_prompt: Some("Custom prompt".to_string()),
1078 ..Default::default()
1079 };
1080
1081 let json = serde_json::to_string(&config).unwrap();
1082 let deserialized: AgentConfig = serde_json::from_str(&json).unwrap();
1083
1084 assert_eq!(
1085 deserialized.system_prompt,
1086 Some("Custom prompt".to_string())
1087 );
1088 }
1089
1090 #[test]
1091 fn test_system_prompt_default_none() {
1092 let config = AgentConfig::default();
1094 assert_eq!(config.system_prompt, None);
1095 }
1096
1097 #[test]
1098 fn test_custom_system_prompt_excludes_project_context() {
1099 use crate::output::events::NullOutput;
1101 use crate::tools::ToolRegistry;
1102 use std::path::PathBuf;
1103
1104 let agent_config = AgentConfig {
1106 system_prompt: Some("You are a general purpose AI assistant.".to_string()),
1107 ..Default::default()
1108 };
1109
1110 let tool_registry = ToolRegistry::default();
1112 let tool_executor = tool_registry.create_executor(&agent_config.tools);
1113
1114 let conversation_manager = ConversationManager::new(
1116 8192, std::sync::Arc::new(MockLlmClient::new()),
1118 );
1119
1120 let (ac, reg) = crate::agent::AbortController::new();
1121
1122 let agent = AgentCore {
1123 config: agent_config,
1124 llm_client: std::sync::Arc::new(MockLlmClient::new()),
1125 tool_executor,
1126 trajectory_recorder: None,
1127 conversation_history: Vec::new(),
1128 output: Box::new(NullOutput),
1129 current_task_displayed: false,
1130 execution_context: None,
1131 conversation_manager,
1132 abort_controller: ac,
1133 abort_registration: reg,
1134 };
1135
1136 let project_path = PathBuf::from("/some/project/path");
1137 let system_prompt = agent.get_system_prompt(&project_path);
1138
1139 assert!(system_prompt.contains("You are a general purpose AI assistant."));
1141
1142 assert!(system_prompt.contains("System Information:"));
1144 assert!(system_prompt.contains("Operating System:"));
1145
1146 assert!(system_prompt.contains("Available tools:"));
1148
1149 assert!(!system_prompt.contains("Project root path"));
1151 assert!(!system_prompt.contains("/some/project/path"));
1152 assert!(!system_prompt.contains("IMPORTANT: When using tools that require file paths"));
1153 assert!(!system_prompt.contains("You are an expert AI software engineering agent"));
1154 }
1155
1156 #[tokio::test]
1157 async fn test_tool_execution_error_handling() {
1158 use crate::llm::{ContentBlock, ToolDefinition};
1161 use crate::output::events::NullOutput;
1162 use std::path::PathBuf;
1163
1164 struct ToolCallLlmClient;
1166
1167 #[async_trait]
1168 impl LlmClient for ToolCallLlmClient {
1169 async fn chat_completion(
1170 &self,
1171 messages: Vec<LlmMessage>,
1172 _tools: Option<Vec<ToolDefinition>>,
1173 _options: Option<ChatOptions>,
1174 ) -> Result<LlmResponse> {
1175 let has_tool_result = messages
1177 .iter()
1178 .any(|msg| matches!(msg.role, crate::llm::MessageRole::Tool));
1179
1180 if has_tool_result {
1182 Ok(LlmResponse {
1183 message: LlmMessage {
1184 role: MessageRole::Assistant,
1185 content: MessageContent::Text(
1186 "Understood, the tool execution failed but I can continue."
1187 .to_string(),
1188 ),
1189 metadata: None,
1190 },
1191 usage: None,
1192 model: "test-model".to_string(),
1193 finish_reason: None,
1194 metadata: None,
1195 })
1196 } else {
1197 Ok(LlmResponse {
1199 message: LlmMessage {
1200 role: MessageRole::Assistant,
1201 content: MessageContent::MultiModal(vec![ContentBlock::ToolUse {
1202 id: "test_id".to_string(),
1203 name: "bash".to_string(), input: serde_json::json!({
1205 "command": "/nonexistent/command/that/will/fail"
1206 }),
1207 }]),
1208 metadata: None,
1209 },
1210 usage: None,
1211 model: "test-model".to_string(),
1212 finish_reason: None,
1213 metadata: None,
1214 })
1215 }
1216 }
1217
1218 fn model_name(&self) -> &str {
1219 "test-model"
1220 }
1221
1222 fn provider_name(&self) -> &str {
1223 "test"
1224 }
1225 }
1226
1227 let agent_config = AgentConfig {
1229 max_steps: 5,
1230 tools: vec!["bash".to_string()], ..Default::default()
1232 };
1233
1234 let tool_registry = crate::tools::ToolRegistry::default();
1235 let tool_executor = tool_registry.create_executor(&agent_config.tools);
1236
1237 let conversation_manager =
1238 ConversationManager::new(8192, std::sync::Arc::new(ToolCallLlmClient));
1239
1240 let (ac, reg) = crate::agent::AbortController::new();
1241
1242 let mut agent = AgentCore {
1243 config: agent_config,
1244 llm_client: std::sync::Arc::new(ToolCallLlmClient),
1245 tool_executor,
1246 trajectory_recorder: None,
1247 conversation_history: Vec::new(),
1248 output: Box::new(NullOutput),
1249 current_task_displayed: false,
1250 execution_context: None,
1251 conversation_manager,
1252 abort_controller: ac,
1253 abort_registration: reg,
1254 };
1255
1256 let project_path = PathBuf::from(".");
1257
1258 let result = agent
1260 .execute_task_with_context("Test task 1", &project_path)
1261 .await;
1262
1263 assert!(result.is_ok());
1265
1266 let has_error_tool_result = agent.conversation_history.iter().any(|msg| {
1268 if let MessageContent::MultiModal(blocks) = &msg.content {
1269 blocks.iter().any(|block| {
1270 if let ContentBlock::ToolResult { content, .. } = block {
1271 content.contains("Tool execution failed") || content.contains("error")
1272 } else {
1273 false
1274 }
1275 })
1276 } else {
1277 false
1278 }
1279 });
1280 assert!(
1281 has_error_tool_result,
1282 "Should have error tool result in history"
1283 );
1284
1285 let result2 = agent
1287 .execute_task_with_context("Test task 2", &project_path)
1288 .await;
1289
1290 assert!(
1292 result2.is_ok(),
1293 "Second task should execute without API errors"
1294 );
1295 }
1296}