Skip to main content

bamboo_engine/runtime/managers/adapters/
llm_mini_loop.rs

1use async_trait::async_trait;
2use bamboo_agent_core::tools::ToolCall;
3use bamboo_agent_core::{AgentError, Session};
4use bamboo_domain::{Message, Role};
5use bamboo_infrastructure::LLMProvider;
6
7use crate::runtime::managers::mini_loop::{MiniLoopDecision, MiniLoopExecutor};
8
9/// Production `MiniLoopExecutor` backed by a real LLM provider.
10///
11/// Uses a fast/cheap model for lightweight decisions such as task complexity
12/// classification, compression decisions, retry classification, and routing.
13pub struct LLMMiniLoopExecutor {
14    provider: std::sync::Arc<dyn LLMProvider>,
15    model: String,
16}
17
18impl LLMMiniLoopExecutor {
19    pub fn new(provider: std::sync::Arc<dyn LLMProvider>, model: String) -> Self {
20        Self { provider, model }
21    }
22}
23
24#[async_trait]
25impl MiniLoopExecutor for LLMMiniLoopExecutor {
26    async fn decide(
27        &self,
28        _session: &Session,
29        prompt: &str,
30        context: &str,
31    ) -> Result<MiniLoopDecision, AgentError> {
32        let user_content = if context.is_empty() {
33            prompt.to_string()
34        } else {
35            format!("Context:\n{}\n\n{}", context, prompt)
36        };
37
38        let now = chrono::Utc::now();
39        let messages = vec![
40            Message {
41                id: String::new(),
42                role: Role::System,
43                content: "You are a task complexity classifier. Respond with exactly one word: simple, standard, or complex.".to_string(),
44                reasoning: None,
45                content_parts: None,
46                image_ocr: None,
47                phase: None,
48                tool_calls: None,
49                tool_call_id: None,
50                tool_success: None,
51                compressed: false,
52                compressed_by_event_id: None,
53                never_compress: false,
54                compression_level: 0,
55                created_at: now,
56                metadata: None,
57            },
58            Message {
59                id: String::new(),
60                role: Role::User,
61                content: user_content,
62                reasoning: None,
63                content_parts: None,
64                image_ocr: None,
65                phase: None,
66                tool_calls: None,
67                tool_call_id: None,
68                tool_success: None,
69                compressed: false,
70                compressed_by_event_id: None,
71                never_compress: false,
72                compression_level: 0,
73                created_at: now,
74                metadata: None,
75            },
76        ];
77
78        let stream = self
79            .provider
80            .chat_stream(&messages, &[], Some(50), &self.model)
81            .await
82            .map_err(|e| AgentError::LLM(e.to_string()))?;
83
84        let output = crate::runtime::stream::handler::consume_llm_stream_silent(
85            stream,
86            &tokio_util::sync::CancellationToken::new(),
87            "mini-loop",
88        )
89        .await
90        .map_err(|e| AgentError::LLM(e.to_string()))?;
91
92        Ok(MiniLoopDecision {
93            answer: output.content.trim().to_string(),
94            prompt_tokens: 0,
95            completion_tokens: 0,
96        })
97    }
98
99    async fn evaluate_task(
100        &self,
101        _session: &Session,
102        _tool_calls: &[ToolCall],
103        _round: usize,
104    ) -> Result<MiniLoopDecision, AgentError> {
105        // Task evaluation is handled by the dedicated task_evaluation module.
106        // This implementation returns an empty decision as a fallback.
107        Ok(MiniLoopDecision {
108            answer: String::new(),
109            prompt_tokens: 0,
110            completion_tokens: 0,
111        })
112    }
113}