Skip to main content

bamboo_compression/
summarizer.rs

1//! Conversation summarization for rolling context management.
2//!
3//! When conversations are truncated due to token limits, a summary preserves
4//! key information from earlier context.
5
6use async_trait::async_trait;
7use bamboo_agent_core::{Message, Role};
8use bamboo_domain::ReasoningEffort;
9use bamboo_infrastructure::LLMChunk;
10use bamboo_infrastructure::{LLMProvider, LLMRequestOptions};
11use futures::StreamExt;
12use std::collections::HashSet;
13use std::sync::Arc;
14
15/// Trait for summarization implementations.
16#[async_trait]
17pub trait Summarizer: Send + Sync {
18    /// Generate a summary of the given messages.
19    ///
20    /// Returns a string containing the summary.
21    async fn summarize(&self, messages: &[Message]) -> Result<String, crate::types::BudgetError>;
22
23    /// Get the estimated token count of the summary.
24    ///
25    /// Used to ensure the summary fits within the budget.
26    fn estimate_summary_tokens(&self, message_count: usize) -> u32 {
27        // Rough estimate: each message contributes ~50 tokens to the summary
28        (message_count * 50).min(1000) as u32
29    }
30}
31
32/// Heuristic summarizer that extracts key points without using an LLM.
33///
34/// This is a lightweight summarization approach that:
35/// 1. Lists user questions/requests
36/// 2. Lists tools that were used
37/// 3. Captures final conclusions
38///
39/// This provides continuity without expensive LLM calls.
40#[derive(Debug, Default)]
41pub struct HeuristicSummarizer;
42
43impl HeuristicSummarizer {
44    /// Create a new heuristic summarizer.
45    pub fn new() -> Self {
46        Self
47    }
48
49    /// Extract user questions from messages.
50    fn extract_user_questions<'a>(&self, messages: &'a [Message]) -> Vec<&'a str> {
51        messages
52            .iter()
53            .filter(|m| m.role == Role::User)
54            .filter(|m| !m.content.is_empty())
55            .take(10) // Limit to prevent huge summaries
56            .map(|m| m.content.as_str())
57            .collect()
58    }
59
60    /// Extract tool calls that were made.
61    fn extract_tools_used(&self, messages: &[Message]) -> Vec<String> {
62        let mut tools = HashSet::new();
63
64        for message in messages {
65            if let Some(ref tool_calls) = message.tool_calls {
66                for call in tool_calls {
67                    tools.insert(call.function.name.clone());
68                }
69            }
70        }
71
72        let mut result: Vec<String> = tools.into_iter().collect();
73        result.sort();
74        result
75    }
76
77    /// Extract key assistant responses.
78    fn extract_key_responses<'a>(&self, messages: &'a [Message]) -> Vec<&'a str> {
79        messages
80            .iter()
81            .filter(|m| m.role == Role::Assistant)
82            .filter(|m| !m.content.is_empty())
83            .rev() // Take most recent first
84            .take(3)
85            .map(|m| m.content.as_str())
86            .collect()
87    }
88
89    /// Safely truncate a string at a character boundary.
90    /// Uses char_indices() to ensure we don't split UTF-8 multi-byte characters.
91    fn safe_truncate(&self, s: &str, max_chars: usize) -> String {
92        if s.chars().count() <= max_chars {
93            return s.to_string();
94        }
95
96        // Take up to max_chars characters safely
97        let truncated: String = s.chars().take(max_chars).collect();
98        format!("{}...", truncated)
99    }
100}
101
102#[async_trait]
103impl Summarizer for HeuristicSummarizer {
104    async fn summarize(&self, messages: &[Message]) -> Result<String, crate::types::BudgetError> {
105        if messages.is_empty() {
106            return Ok("No conversation history.".to_string());
107        }
108
109        let questions = self.extract_user_questions(messages);
110        let tools = self.extract_tools_used(messages);
111        let responses = self.extract_key_responses(messages);
112
113        let mut summary_parts = Vec::new();
114
115        // User requests section
116        if !questions.is_empty() {
117            summary_parts.push("## User Requests".to_string());
118            for (i, q) in questions.iter().enumerate() {
119                // Truncate long questions for the summary (safe UTF-8)
120                let truncated = self.safe_truncate(q, 200);
121                summary_parts.push(format!("{}. {}", i + 1, truncated));
122            }
123        }
124
125        // Tools used section
126        if !tools.is_empty() {
127            summary_parts.push("\n## Tools Used".to_string());
128            for tool in tools {
129                summary_parts.push(format!("- {}", tool));
130            }
131        }
132
133        // Key responses section
134        if !responses.is_empty() {
135            summary_parts.push("\n## Key Outcomes".to_string());
136            for (i, r) in responses.iter().enumerate() {
137                // Truncate long responses (safe UTF-8)
138                let truncated = self.safe_truncate(r, 300);
139                summary_parts.push(format!("{}. {}", i + 1, truncated));
140            }
141        }
142
143        if summary_parts.is_empty() {
144            Ok("Previous conversation context available.".to_string())
145        } else {
146            Ok(summary_parts.join("\n"))
147        }
148    }
149}
150
151/// Trigger conditions for when to create a summary.
152#[derive(Debug, Clone)]
153pub enum SummaryTrigger {
154    /// Always summarize when truncation occurs
155    OnTruncation,
156    /// Summarize after N rounds of conversation
157    Periodic { interval: usize },
158    /// Summarize when token count exceeds threshold
159    TokenThreshold { threshold: u32 },
160}
161
162/// Manager for conversation summarization.
163pub struct SummaryManager {
164    summarizer: Box<dyn Summarizer>,
165    trigger: SummaryTrigger,
166}
167
168impl std::fmt::Debug for SummaryManager {
169    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
170        f.debug_struct("SummaryManager")
171            .field("trigger", &self.trigger)
172            .finish_non_exhaustive()
173    }
174}
175
176impl SummaryManager {
177    /// Create a new summary manager.
178    pub fn new(summarizer: impl Summarizer + 'static, trigger: SummaryTrigger) -> Self {
179        Self {
180            summarizer: Box::new(summarizer),
181            trigger,
182        }
183    }
184
185    /// Check if summarization should be triggered based on conversation state.
186    pub fn should_summarize(
187        &self,
188        messages: &[Message],
189        _truncation_occurred: bool,
190        current_token_count: u32,
191    ) -> bool {
192        match &self.trigger {
193            SummaryTrigger::OnTruncation => _truncation_occurred,
194            SummaryTrigger::Periodic { interval } => messages.len() >= *interval,
195            SummaryTrigger::TokenThreshold { threshold } => current_token_count >= *threshold,
196        }
197    }
198
199    /// Generate a summary of the messages.
200    pub async fn summarize(
201        &self,
202        messages: &[Message],
203    ) -> Result<String, crate::types::BudgetError> {
204        self.summarizer.summarize(messages).await
205    }
206
207    /// Estimate the token count of a summary for N messages.
208    pub fn estimate_summary_tokens(&self, message_count: usize) -> u32 {
209        self.summarizer.estimate_summary_tokens(message_count)
210    }
211}
212
213/// Mode controlling how the LLM summarizer handles existing summaries.
214#[derive(Debug, Clone, Default)]
215pub enum SummaryMode {
216    /// Generate a complete summary from scratch (default).
217    #[default]
218    FullRewrite,
219    /// Update an existing summary by incorporating new information incrementally.
220    IncrementalMerge,
221}
222
223/// LLM-based summarizer that calls the current session's model to generate
224/// a rich summary of compressed/removed messages.
225///
226/// Falls back to [`HeuristicSummarizer`] if the LLM call fails.
227pub struct LlmSummarizer {
228    llm: Arc<dyn LLMProvider>,
229    model: String,
230    /// Optional existing summary to build upon (incremental summarization).
231    existing_summary: Option<String>,
232    /// Optional current task list prompt so summary generation can distinguish
233    /// active vs completed/obsolete work using the session's source of truth.
234    task_list_prompt: Option<String>,
235    /// Optional user-provided instructions that override/extend the default summary focus.
236    custom_instructions: Option<String>,
237    /// Controls how the summarizer handles existing summaries.
238    summary_mode: SummaryMode,
239}
240
241impl LlmSummarizer {
242    pub fn new(
243        llm: Arc<dyn LLMProvider>,
244        model: String,
245        existing_summary: Option<String>,
246        task_list_prompt: Option<String>,
247    ) -> Self {
248        Self {
249            llm,
250            model,
251            existing_summary,
252            task_list_prompt,
253            custom_instructions: None,
254            summary_mode: SummaryMode::default(),
255        }
256    }
257
258    pub fn with_custom_instructions(mut self, instructions: Option<String>) -> Self {
259        self.custom_instructions = instructions;
260        self
261    }
262
263    pub fn with_summary_mode(mut self, mode: SummaryMode) -> Self {
264        self.summary_mode = mode;
265        self
266    }
267
268    /// Build the summarization prompt for the LLM.
269    fn build_summarization_messages(&self, messages: &[Message]) -> Vec<Message> {
270        let mut prompt_messages = Vec::new();
271
272        let system_prompt = match self.summary_mode {
273            SummaryMode::FullRewrite => {
274                r#"You are a conversation summarizer. Your task is to create a concise but reliable working-memory summary for a conversation that was removed due to context window limits.
275
276Guidelines:
277- First capture the in-flight work right before compression (what was being done, where, and with which tool/file)
278- Distinguish clearly between CURRENT ACTIVE work, COMPLETED work, and OBSOLETE or superseded work
279- Do not restate old tasks as active unless they are still unresolved
280- The provided current task list is the source of truth for active work
281- Preserve key decisions, constraints, file paths, code changes, tool findings, blockers, and important outcomes
282- Preserve error messages, test results (pass/fail counts), and function/variable names that are relevant to active work
283- If earlier plans conflict with newer messages or the current task list, mark them as obsolete or completed
284- Explicitly evaluate each clear user requirement (e.g. requirement 1, requirement 2) with a status and evidence
285- Keep the next step specific and aligned with the active work only
286- Use structured sections
287- Write in the same language as the original conversation"#
288            }
289            SummaryMode::IncrementalMerge => {
290                r#"You are updating an existing conversation summary with new information from recent messages.
291
292Guidelines:
293- Incorporate new information into the existing summary structure
294- Mark previously active work as completed if the new messages confirm completion
295- Remove or condense information that is no longer relevant
296- Preserve all key decisions, file paths, and constraints that remain active
297- If new messages conflict with the existing summary, the new messages take precedence
298- Keep the summary focused on what is currently active and relevant
299- The provided current task list is the source of truth for active work
300- Maintain the same structured sections as the existing summary
301- Write in the same language as the original conversation
302- Be concise: avoid repeating information already well-captured in the existing summary"#
303            }
304        };
305
306        prompt_messages.push(Message::system(system_prompt));
307
308        let mut user_content = String::new();
309
310        if let Some(ref existing) = self.existing_summary {
311            user_content.push_str("## Previous Summary\n\n");
312            user_content.push_str(existing);
313            user_content.push_str("\n\n---\n\n");
314        }
315
316        if let Some(task_list_prompt) = self
317            .task_list_prompt
318            .as_deref()
319            .map(str::trim)
320            .filter(|value| !value.is_empty())
321        {
322            user_content.push_str("## Current Task List\n\n");
323            user_content.push_str(task_list_prompt);
324            user_content.push_str("\n\n---\n\n");
325        }
326
327        if let Some(ref instructions) = self.custom_instructions {
328            if !instructions.trim().is_empty() {
329                user_content.push_str("## Custom Compression Instructions\n\n");
330                user_content.push_str(instructions.trim());
331                user_content.push_str("\n\n---\n\n");
332            }
333        }
334
335        user_content.push_str(
336            "## Required Output Sections\n1. Pre-compression in-flight work (what was being done immediately before compression)\n2. Current active objective\n3. Requirement checklist (Requirement | Status: completed/in_progress/pending/blocked/obsolete | Evidence)\n4. Active tasks\n5. Completed tasks\n6. Obsolete or superseded tasks\n7. Important context and constraints\n8. Files, code, and tool findings\n9. Open issues and next step\n\n",
337        );
338
339        user_content.push_str("## Messages to Summarize\n\n");
340
341        for message in messages {
342            let role_label = match message.role {
343                Role::User => "User",
344                Role::Assistant => "Assistant",
345                Role::Tool => "Tool Result",
346                Role::System => continue,
347            };
348
349            if let Some(ref tool_calls) = message.tool_calls {
350                if !tool_calls.is_empty() {
351                    let tool_names: Vec<&str> = tool_calls
352                        .iter()
353                        .map(|tc| tc.function.name.as_str())
354                        .collect();
355                    user_content.push_str(&format!(
356                        "**{}** [called tools: {}]:\n",
357                        role_label,
358                        tool_names.join(", ")
359                    ));
360                } else {
361                    user_content.push_str(&format!("**{}**:\n", role_label));
362                }
363            } else {
364                user_content.push_str(&format!("**{}**:\n", role_label));
365            }
366
367            if let Some(ref tool_call_id) = message.tool_call_id {
368                user_content.push_str(&format!("(tool_call_id: {})\n", tool_call_id));
369            }
370
371            let content = &message.content;
372            const MAX_CONTENT_CHARS: usize = 2000;
373            if content.chars().count() > MAX_CONTENT_CHARS {
374                let truncated: String = content.chars().take(MAX_CONTENT_CHARS).collect();
375                user_content.push_str(&truncated);
376                user_content.push_str("... [truncated]\n\n");
377            } else {
378                user_content.push_str(content);
379                user_content.push_str("\n\n");
380            }
381        }
382
383        user_content.push_str(
384            "\n---\n\nReturn only the summary text. Be explicit about what is active now versus what is already completed or no longer relevant.",
385        );
386
387        prompt_messages.push(Message::user(user_content));
388
389        prompt_messages
390    }
391
392    /// Consume an LLM stream and collect the full text response.
393    async fn collect_stream_response(
394        &self,
395        messages: &[Message],
396    ) -> Result<String, crate::types::BudgetError> {
397        // Summarization is a lightweight auxiliary request; cap reasoning effort at `high`
398        // to stay compatible with fast models (e.g. gpt-5-mini).
399        let options = LLMRequestOptions {
400            session_id: None,
401            reasoning_effort: Some(ReasoningEffort::High),
402            parallel_tool_calls: None,
403            responses: None,
404        };
405        let stream = self
406            .llm
407            .chat_stream_with_options(messages, &[], None, &self.model, Some(&options))
408            .await
409            .map_err(|e| {
410                crate::types::BudgetError::TokenCountError(format!(
411                    "LLM summarization call failed: {}",
412                    e
413                ))
414            })?;
415
416        let mut content = String::new();
417        let mut stream = stream;
418
419        while let Some(chunk_result) = stream.next().await {
420            match chunk_result {
421                Ok(LLMChunk::Token(text)) => content.push_str(&text),
422                Ok(LLMChunk::Done) => break,
423                Ok(_) => {} // Ignore reasoning tokens, tool calls, etc.
424                Err(e) => {
425                    tracing::warn!("LLM summarization stream error: {}", e);
426                    if !content.is_empty() {
427                        break;
428                    }
429                    return Err(crate::types::BudgetError::TokenCountError(format!(
430                        "LLM summarization stream failed: {}",
431                        e
432                    )));
433                }
434            }
435        }
436
437        Ok(content)
438    }
439}
440
441impl std::fmt::Debug for LlmSummarizer {
442    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
443        f.debug_struct("LlmSummarizer")
444            .field("model", &self.model)
445            .field("has_existing_summary", &self.existing_summary.is_some())
446            .finish()
447    }
448}
449
450#[async_trait]
451impl Summarizer for LlmSummarizer {
452    async fn summarize(&self, messages: &[Message]) -> Result<String, crate::types::BudgetError> {
453        if messages.is_empty() {
454            return Ok("No conversation history to summarize.".to_string());
455        }
456
457        let prompt_messages = self.build_summarization_messages(messages);
458
459        tracing::info!(
460            "LlmSummarizer: summarizing {} messages using model '{}' (existing_summary={})",
461            messages.len(),
462            self.model,
463            self.existing_summary.is_some()
464        );
465
466        match self.collect_stream_response(&prompt_messages).await {
467            Ok(summary) if !summary.trim().is_empty() => {
468                tracing::info!("LlmSummarizer: generated summary ({} chars)", summary.len());
469                Ok(summary)
470            }
471            Ok(_) => {
472                tracing::warn!(
473                    "LlmSummarizer: LLM returned empty summary, falling back to heuristic"
474                );
475                HeuristicSummarizer::new().summarize(messages).await
476            }
477            Err(e) => {
478                tracing::warn!(
479                    "LlmSummarizer: LLM call failed ({}), falling back to heuristic",
480                    e
481                );
482                HeuristicSummarizer::new().summarize(messages).await
483            }
484        }
485    }
486
487    fn estimate_summary_tokens(&self, message_count: usize) -> u32 {
488        // LLM summaries tend to be more detailed; estimate higher than heuristic
489        (message_count * 80).min(2000) as u32
490    }
491}
492
493#[cfg(test)]
494mod tests {
495    use super::*;
496    use async_trait::async_trait;
497    use bamboo_domain::ReasoningEffort;
498    use bamboo_infrastructure::{LLMChunk, LLMError, LLMRequestOptions, LLMStream};
499    use futures::stream;
500    use std::sync::Mutex;
501
502    struct DummyProvider;
503
504    #[async_trait]
505    impl LLMProvider for DummyProvider {
506        async fn chat_stream(
507            &self,
508            _messages: &[Message],
509            _tools: &[bamboo_agent_core::ToolSchema],
510            _max_output_tokens: Option<u32>,
511            _model: &str,
512        ) -> Result<LLMStream, LLMError> {
513            Ok(Box::pin(stream::iter(vec![
514                Ok::<LLMChunk, LLMError>(LLMChunk::Token("dummy summary".to_string())),
515                Ok::<LLMChunk, LLMError>(LLMChunk::Done),
516            ])))
517        }
518    }
519
520    #[test]
521    fn heuristic_summarizer_extracts_user_questions() {
522        let summarizer = HeuristicSummarizer::new();
523        let messages = vec![
524            Message::user("What is the weather?"),
525            Message::assistant("It's sunny.", None),
526            Message::user("What about tomorrow?"),
527        ];
528
529        let questions = summarizer.extract_user_questions(&messages);
530        assert_eq!(questions.len(), 2);
531        assert!(questions[0].contains("weather"));
532    }
533
534    #[test]
535    fn heuristic_summarizer_extracts_tools_used() {
536        use bamboo_agent_core::{FunctionCall, ToolCall};
537
538        let summarizer = HeuristicSummarizer::new();
539        let tool_call = ToolCall {
540            id: "call_1".to_string(),
541            tool_type: "function".to_string(),
542            function: FunctionCall {
543                name: "search".to_string(),
544                arguments: "{}".to_string(),
545            },
546        };
547
548        let messages = vec![
549            Message::user("Search for something"),
550            Message::assistant("I'll search", Some(vec![tool_call])),
551        ];
552
553        let tools = summarizer.extract_tools_used(&messages);
554        assert_eq!(tools, vec!["search"]);
555    }
556
557    #[test]
558    fn heuristic_summarizer_extracts_key_responses() {
559        let summarizer = HeuristicSummarizer::new();
560        let messages = vec![
561            Message::user("Hello"),
562            Message::assistant("First response", None),
563            Message::user("How are you?"),
564            Message::assistant("Most recent response", None),
565        ];
566
567        let responses = summarizer.extract_key_responses(&messages);
568        // Should return most recent first
569        assert_eq!(responses[0], "Most recent response");
570    }
571
572    #[tokio::test]
573    async fn heuristic_summarizer_generates_summary() {
574        let summarizer = HeuristicSummarizer::new();
575        let messages = vec![
576            Message::user("What is Rust?"),
577            Message::assistant("Rust is a systems programming language.", None),
578        ];
579
580        let summary = summarizer.summarize(&messages).await.unwrap();
581        assert!(summary.contains("User Requests"));
582        assert!(summary.contains("What is Rust?"));
583    }
584
585    #[test]
586    fn summary_trigger_on_truncation() {
587        let trigger = SummaryTrigger::OnTruncation;
588
589        assert!(matches!(trigger, SummaryTrigger::OnTruncation));
590        // When truncation_occurred is true
591        assert!(matches!(trigger, SummaryTrigger::OnTruncation));
592        // When truncation_occurred is false - just verify the trigger type
593    }
594
595    #[test]
596    fn summary_trigger_periodic() {
597        let trigger = SummaryTrigger::Periodic { interval: 5 };
598        let messages: Vec<Message> = (0..5).map(|_| Message::user("Test")).collect();
599
600        // Verify the trigger is periodic with correct interval
601        if let SummaryTrigger::Periodic { interval } = trigger {
602            assert_eq!(interval, 5);
603            assert!(messages.len() >= interval);
604        } else {
605            panic!("Expected Periodic trigger");
606        }
607    }
608
609    #[test]
610    fn summary_trigger_token_threshold() {
611        let trigger = SummaryTrigger::TokenThreshold { threshold: 1000 };
612
613        // Verify the trigger has the correct threshold
614        if let SummaryTrigger::TokenThreshold { threshold } = trigger {
615            assert_eq!(threshold, 1000);
616        } else {
617            panic!("Expected TokenThreshold trigger");
618        }
619    }
620
621    #[test]
622    fn safe_truncate_handles_ascii() {
623        let summarizer = HeuristicSummarizer::new();
624        let text = "Hello world this is a test";
625        let truncated = summarizer.safe_truncate(text, 10);
626
627        assert!(truncated.ends_with("..."));
628        // Should have at most 10 characters + "..."
629        assert!(truncated.chars().count() <= 13);
630    }
631
632    #[test]
633    fn safe_truncate_handles_unicode() {
634        let summarizer = HeuristicSummarizer::new();
635
636        // Test with emoji (multi-byte UTF-8)
637        let text = "Hello 😀🎉🚀 World with emoji";
638        let truncated = summarizer.safe_truncate(text, 10);
639
640        // Should not panic and should end with "..."
641        assert!(truncated.ends_with("..."));
642        assert!(truncated.chars().count() <= 13);
643    }
644
645    #[test]
646    fn safe_truncate_handles_cjk() {
647        let summarizer = HeuristicSummarizer::new();
648
649        // Test with Chinese/Japanese/Korean characters (3-byte UTF-8)
650        let text = "这是一个中文测试消息用于验证截断";
651        let truncated = summarizer.safe_truncate(text, 10);
652
653        // Should not panic
654        assert!(truncated.ends_with("..."));
655        assert!(truncated.chars().count() <= 13);
656    }
657
658    #[test]
659    fn safe_truncate_handles_mixed_unicode() {
660        let summarizer = HeuristicSummarizer::new();
661
662        // Mixed ASCII, CJK, and emoji
663        let text = "Hello 世界 🌍 test message";
664        let truncated = summarizer.safe_truncate(text, 8);
665
666        // Should not panic
667        assert!(truncated.ends_with("..."));
668        assert!(truncated.chars().count() <= 11);
669    }
670
671    #[tokio::test]
672    async fn summarizer_handles_unicode_messages() {
673        let summarizer = HeuristicSummarizer::new();
674
675        // Create messages with unicode that needs truncation
676        let long_unicode =
677            "这是一段很长的中文消息需要被截断以测试我们的安全截断功能 😀🎉🚀".repeat(10);
678        let messages = vec![
679            Message::user(&long_unicode),
680            Message::assistant("Response", None),
681        ];
682
683        // Should not panic on unicode truncation
684        let summary = summarizer.summarize(&messages).await.unwrap();
685        assert!(summary.contains("User Requests"));
686    }
687
688    #[test]
689    fn safe_truncate_returns_short_text_unchanged() {
690        let summarizer = HeuristicSummarizer::new();
691        let text = "Short";
692        let truncated = summarizer.safe_truncate(text, 100);
693
694        // Should return unchanged
695        assert_eq!(truncated, text);
696    }
697
698    #[test]
699    fn llm_summarizer_prompt_includes_task_list_and_state_sections() {
700        let summarizer = LlmSummarizer::new(
701            Arc::new(DummyProvider),
702            "gpt-4o-mini".to_string(),
703            Some("Earlier summary".to_string()),
704            Some(
705                "## Current Task List\n[/] task_1: Fix compression bounce\n[x] task_0: Analyze bug"
706                    .to_string(),
707            ),
708        );
709        let messages = vec![
710            Message::user("继续做压缩修复"),
711            Message::assistant("我先检查 trigger 与 target", None),
712        ];
713
714        let prompt_messages = summarizer.build_summarization_messages(&messages);
715        assert_eq!(prompt_messages.len(), 2);
716        assert_eq!(prompt_messages[0].role, Role::System);
717        assert!(prompt_messages[1].content.contains("## Current Task List"));
718        assert!(prompt_messages[1]
719            .content
720            .contains("Current active objective"));
721        assert!(prompt_messages[1].content.contains("Requirement checklist"));
722        assert!(prompt_messages[1].content.contains("Active tasks"));
723        assert!(prompt_messages[1].content.contains("Completed tasks"));
724        assert!(prompt_messages[1]
725            .content
726            .contains("Obsolete or superseded tasks"));
727        assert!(prompt_messages[1].content.contains("Earlier summary"));
728    }
729
730    #[derive(Default)]
731    struct ReasoningCaptureProvider {
732        captured_reasoning: Mutex<Vec<Option<ReasoningEffort>>>,
733    }
734
735    #[async_trait]
736    impl LLMProvider for ReasoningCaptureProvider {
737        async fn chat_stream(
738            &self,
739            _messages: &[Message],
740            _tools: &[bamboo_agent_core::ToolSchema],
741            _max_output_tokens: Option<u32>,
742            _model: &str,
743        ) -> Result<LLMStream, LLMError> {
744            Ok(Box::pin(stream::iter(vec![
745                Ok::<LLMChunk, LLMError>(LLMChunk::Token("captured summary".to_string())),
746                Ok::<LLMChunk, LLMError>(LLMChunk::Done),
747            ])))
748        }
749
750        async fn chat_stream_with_options(
751            &self,
752            messages: &[Message],
753            tools: &[bamboo_agent_core::ToolSchema],
754            max_output_tokens: Option<u32>,
755            model: &str,
756            options: Option<&LLMRequestOptions>,
757        ) -> Result<LLMStream, LLMError> {
758            self.captured_reasoning
759                .lock()
760                .expect("captured reasoning lock should not be poisoned")
761                .push(options.and_then(|o| o.reasoning_effort));
762            self.chat_stream(messages, tools, max_output_tokens, model)
763                .await
764        }
765    }
766
767    #[tokio::test]
768    async fn llm_summarizer_requests_high_reasoning_effort_for_summary_calls() {
769        let provider = Arc::new(ReasoningCaptureProvider::default());
770        let summarizer = LlmSummarizer::new(
771            provider.clone(),
772            "gpt-5-mini".to_string(),
773            None,
774            Some("task list".to_string()),
775        );
776        let messages = vec![
777            Message::user("请总结最近三轮"),
778            Message::assistant("已完成第一步并准备第二步", None),
779        ];
780
781        let summary = summarizer
782            .summarize(&messages)
783            .await
784            .expect("summary generation should succeed");
785        assert_eq!(summary, "captured summary");
786
787        let captured = provider
788            .captured_reasoning
789            .lock()
790            .expect("captured reasoning lock should not be poisoned");
791        assert_eq!(captured.as_slice(), [Some(ReasoningEffort::High)]);
792    }
793
794    #[test]
795    fn full_rewrite_mode_uses_default_system_prompt() {
796        let summarizer =
797            LlmSummarizer::new(Arc::new(DummyProvider), "model".to_string(), None, None)
798                .with_summary_mode(SummaryMode::FullRewrite);
799        let messages = vec![Message::user("hello"), Message::assistant("hi", None)];
800        let prompts = summarizer.build_summarization_messages(&messages);
801        let system = &prompts[0].content;
802        assert!(
803            system.contains("conversation summarizer"),
804            "FullRewrite prompt should contain 'conversation summarizer'"
805        );
806        assert!(
807            !system.contains("updating an existing"),
808            "FullRewrite prompt should not contain incremental language"
809        );
810    }
811
812    #[test]
813    fn incremental_merge_mode_uses_update_system_prompt() {
814        let summarizer = LlmSummarizer::new(
815            Arc::new(DummyProvider),
816            "model".to_string(),
817            Some("Previous summary content".to_string()),
818            None,
819        )
820        .with_summary_mode(SummaryMode::IncrementalMerge);
821        let messages = vec![Message::user("hello"), Message::assistant("hi", None)];
822        let prompts = summarizer.build_summarization_messages(&messages);
823        let system = &prompts[0].content;
824        assert!(
825            system.contains("updating an existing conversation summary"),
826            "IncrementalMerge prompt should contain 'updating an existing conversation summary'"
827        );
828        assert!(
829            system.contains("Incorporate new information"),
830            "IncrementalMerge prompt should mention incorporating new information"
831        );
832    }
833
834    #[test]
835    fn default_summary_mode_is_full_rewrite() {
836        assert!(matches!(SummaryMode::default(), SummaryMode::FullRewrite));
837    }
838
839    #[test]
840    fn incremental_merge_includes_existing_summary_in_user_content() {
841        let summarizer = LlmSummarizer::new(
842            Arc::new(DummyProvider),
843            "model".to_string(),
844            Some("Previous summary content".to_string()),
845            None,
846        )
847        .with_summary_mode(SummaryMode::IncrementalMerge);
848        let messages = vec![
849            Message::user("new work"),
850            Message::assistant("doing it", None),
851        ];
852        let prompts = summarizer.build_summarization_messages(&messages);
853        let user_content = &prompts[1].content;
854        assert!(
855            user_content.contains("Previous Summary"),
856            "IncrementalMerge user prompt should include the existing summary"
857        );
858        assert!(
859            user_content.contains("Previous summary content"),
860            "IncrementalMerge user prompt should include the actual summary text"
861        );
862    }
863}