Skip to main content

agent_sdk/context/
compactor.rs

1//! Context compaction implementation.
2
3use crate::llm::{ChatOutcome, ChatRequest, Content, ContentBlock, LlmProvider, Message, Role};
4use anyhow::{Context, Result, bail};
5use async_trait::async_trait;
6use std::fmt::Write;
7use std::sync::Arc;
8
9use super::config::CompactionConfig;
10use super::estimator::TokenEstimator;
11
12const SUMMARY_PREFIX: &str = "[Previous conversation summary]\n\n";
13const COMPACTION_SYSTEM_PROMPT: &str = "You are a precise summarizer. Your task is to create concise but complete summaries of conversations, preserving all technical details needed to continue the work.";
14const COMPACTION_SUMMARY_PROMPT_PREFIX: &str = "Summarize this conversation concisely, preserving:\n- Key decisions and conclusions reached\n- Important file paths, code changes, and technical details\n- Current task context and what has been accomplished\n- Any pending items, errors encountered, or next steps\n\nBe specific about technical details (file names, function names, error messages) as these\nare critical for continuing the work.\n\nConversation:\n";
15const COMPACTION_SUMMARY_PROMPT_SUFFIX: &str =
16    "Provide a concise summary (aim for 500-1000 words):";
17const COMPACT_EMPTY_SUMMARY: &str = "No additional context was available to summarize; the previous messages were already compacted.";
18const SUMMARY_ACKNOWLEDGMENT: &str =
19    "I understand the context from the summary. Let me continue from where we left off.";
20const MAX_RETAINED_TAIL_MESSAGE_TOKENS: usize = 20_000;
21const MAX_TOOL_RESULT_CHARS: usize = 500;
22
23/// Trait for context compaction strategies.
24///
25/// Implement this trait to provide custom compaction logic.
26#[async_trait]
27pub trait ContextCompactor: Send + Sync {
28    /// Compact a list of messages into a summary.
29    ///
30    /// # Errors
31    /// Returns an error if summarization fails.
32    async fn compact(&self, messages: &[Message]) -> Result<String>;
33
34    /// Estimate tokens for a message list.
35    fn estimate_tokens(&self, messages: &[Message]) -> usize;
36
37    /// Check if compaction is needed.
38    fn needs_compaction(&self, messages: &[Message]) -> bool;
39
40    /// Perform full compaction, returning new message history.
41    ///
42    /// # Errors
43    /// Returns an error if compaction fails.
44    async fn compact_history(&self, messages: Vec<Message>) -> Result<CompactionResult>;
45}
46
47/// Result of a compaction operation.
48#[derive(Debug, Clone)]
49pub struct CompactionResult {
50    /// The new compacted message history.
51    pub messages: Vec<Message>,
52    /// Number of messages before compaction.
53    pub original_count: usize,
54    /// Number of messages after compaction.
55    pub new_count: usize,
56    /// Estimated tokens before compaction.
57    pub original_tokens: usize,
58    /// Estimated tokens after compaction.
59    pub new_tokens: usize,
60}
61
62/// LLM-based context compactor.
63///
64/// Uses the LLM itself to summarize older messages into a compact form.
65pub struct LlmContextCompactor<P: LlmProvider> {
66    provider: Arc<P>,
67    config: CompactionConfig,
68    system_prompt: String,
69    summary_prompt_prefix: String,
70    summary_prompt_suffix: String,
71}
72
73impl<P: LlmProvider> LlmContextCompactor<P> {
74    /// Create a new LLM context compactor.
75    #[must_use]
76    pub fn new(provider: Arc<P>, config: CompactionConfig) -> Self {
77        Self {
78            provider,
79            config,
80            system_prompt: COMPACTION_SYSTEM_PROMPT.to_string(),
81            summary_prompt_prefix: COMPACTION_SUMMARY_PROMPT_PREFIX.to_string(),
82            summary_prompt_suffix: COMPACTION_SUMMARY_PROMPT_SUFFIX.to_string(),
83        }
84    }
85
86    /// Create with default configuration.
87    #[must_use]
88    pub fn with_defaults(provider: Arc<P>) -> Self {
89        Self::new(provider, CompactionConfig::default())
90    }
91
92    /// Get the configuration.
93    #[must_use]
94    pub const fn config(&self) -> &CompactionConfig {
95        &self.config
96    }
97
98    /// Override the prompts used for LLM-based summarization.
99    #[must_use]
100    pub fn with_prompts(
101        mut self,
102        system_prompt: impl Into<String>,
103        summary_prompt_prefix: impl Into<String>,
104        summary_prompt_suffix: impl Into<String>,
105    ) -> Self {
106        self.system_prompt = system_prompt.into();
107        self.summary_prompt_prefix = summary_prompt_prefix.into();
108        self.summary_prompt_suffix = summary_prompt_suffix.into();
109        self
110    }
111
112    /// Return true when a content object is a previously inserted compaction summary marker.
113    fn is_summary_message(content: &Content) -> bool {
114        match content {
115            Content::Text(text) => text.starts_with(SUMMARY_PREFIX),
116            Content::Blocks(blocks) => blocks.iter().any(|block| match block {
117                ContentBlock::Text { text } => text.starts_with(SUMMARY_PREFIX),
118                _ => false,
119            }),
120        }
121    }
122
123    /// Return true when a message contains a tool-use block.
124    fn has_tool_use(content: &Content) -> bool {
125        matches!(
126            content,
127            Content::Blocks(blocks)
128                if blocks
129                    .iter()
130                    .any(|block| matches!(block, ContentBlock::ToolUse { .. }))
131        )
132    }
133
134    /// Return true when a message contains a tool-result block.
135    fn has_tool_result(content: &Content) -> bool {
136        matches!(
137            content,
138            Content::Blocks(blocks)
139                if blocks
140                    .iter()
141                    .any(|block| matches!(block, ContentBlock::ToolResult { .. }))
142        )
143    }
144
145    /// Shift split point backwards until tool-use/result pairs are not split.
146    fn split_point_preserves_tool_pairs(messages: &[Message], mut split_point: usize) -> usize {
147        while split_point > 0 && split_point < messages.len() {
148            let prev = &messages[split_point - 1];
149            let next = &messages[split_point];
150
151            let crosses_tool_pair = (prev.role == Role::Assistant
152                && Self::has_tool_use(&prev.content)
153                && next.role == Role::User
154                && Self::has_tool_result(&next.content))
155                || (prev.role == Role::User
156                    && Self::has_tool_result(&prev.content)
157                    && next.role == Role::Assistant
158                    && Self::has_tool_use(&next.content));
159
160            if crosses_tool_pair {
161                split_point -= 1;
162                continue;
163            }
164
165            break;
166        }
167
168        split_point
169    }
170
171    /// Shift split point to satisfy both pair safety and retained-tail token cap.
172    fn split_point_preserves_tool_pairs_with_cap(
173        messages: &[Message],
174        mut split_point: usize,
175        max_tokens: usize,
176    ) -> usize {
177        loop {
178            let candidate = Self::retain_tail_with_token_cap(messages, split_point, max_tokens);
179            let adjusted = Self::split_point_preserves_tool_pairs(messages, candidate);
180
181            if adjusted == split_point {
182                return candidate;
183            }
184
185            split_point = adjusted;
186        }
187    }
188
189    /// Keep most recent messages that fit within the retained-message token budget.
190    fn retain_tail_with_token_cap(messages: &[Message], start: usize, max_tokens: usize) -> usize {
191        if start >= messages.len() {
192            return messages.len();
193        }
194
195        if max_tokens == 0 {
196            return messages.len();
197        }
198
199        let mut used = 0usize;
200        let mut retained_start = messages.len();
201
202        for idx in (start..messages.len()).rev() {
203            let message_tokens = TokenEstimator::estimate_message(&messages[idx]);
204            if used + message_tokens > max_tokens {
205                break;
206            }
207
208            retained_start = idx;
209            used += message_tokens;
210        }
211
212        retained_start
213    }
214
215    /// Format messages for summarization.
216    fn format_messages_for_summary(messages: &[Message]) -> String {
217        let mut output = String::new();
218
219        for message in messages {
220            let role = match message.role {
221                Role::User => "User",
222                Role::Assistant => "Assistant",
223            };
224
225            let _ = write!(output, "{role}: ");
226
227            match &message.content {
228                Content::Text(text) => {
229                    let _ = writeln!(output, "{text}");
230                }
231                Content::Blocks(blocks) => {
232                    for block in blocks {
233                        match block {
234                            ContentBlock::Text { text } => {
235                                let _ = writeln!(output, "{text}");
236                            }
237                            ContentBlock::Thinking { thinking, .. } => {
238                                // Include thinking in summaries for context
239                                let _ = writeln!(output, "[Thinking: {thinking}]");
240                            }
241                            ContentBlock::RedactedThinking { .. } => {
242                                let _ = writeln!(output, "[Redacted thinking]");
243                            }
244                            ContentBlock::ToolUse { name, input, .. } => {
245                                let _ = writeln!(
246                                    output,
247                                    "[Called tool: {name} with input: {}]",
248                                    serde_json::to_string(input).unwrap_or_default()
249                                );
250                            }
251                            ContentBlock::ToolResult {
252                                content, is_error, ..
253                            } => {
254                                let status = if is_error.unwrap_or(false) {
255                                    "error"
256                                } else {
257                                    "success"
258                                };
259                                // Truncate long tool results (Unicode-safe; avoid slicing mid-codepoint)
260                                let truncated = if content.chars().count() > MAX_TOOL_RESULT_CHARS {
261                                    let prefix: String =
262                                        content.chars().take(MAX_TOOL_RESULT_CHARS).collect();
263                                    format!("{prefix}... (truncated)")
264                                } else {
265                                    content.clone()
266                                };
267                                let _ = writeln!(output, "[Tool result ({status}): {truncated}]");
268                            }
269                            ContentBlock::Image { source } => {
270                                let _ = writeln!(output, "[Image: {}]", source.media_type);
271                            }
272                            ContentBlock::Document { source } => {
273                                let _ = writeln!(output, "[Document: {}]", source.media_type);
274                            }
275                        }
276                    }
277                }
278            }
279            output.push('\n');
280        }
281
282        output
283    }
284
285    /// Build the summarization prompt.
286    fn build_summary_prompt(&self, messages_text: &str) -> String {
287        format!(
288            "{}{}{}",
289            self.summary_prompt_prefix, messages_text, self.summary_prompt_suffix
290        )
291    }
292}
293
294#[async_trait]
295impl<P: LlmProvider> ContextCompactor for LlmContextCompactor<P> {
296    async fn compact(&self, messages: &[Message]) -> Result<String> {
297        let messages_to_summarize: Vec<_> = messages
298            .iter()
299            .filter(|message| !Self::is_summary_message(&message.content))
300            .cloned()
301            .collect();
302
303        if messages_to_summarize.is_empty() {
304            return Ok(COMPACT_EMPTY_SUMMARY.to_string());
305        }
306
307        let messages_text = Self::format_messages_for_summary(&messages_to_summarize);
308        let prompt = self.build_summary_prompt(&messages_text);
309
310        let request = ChatRequest {
311            system: self.system_prompt.clone(),
312            messages: vec![Message::user(prompt)],
313            tools: None,
314            max_tokens: 2000,
315            thinking: None,
316        };
317
318        let outcome = self
319            .provider
320            .chat(request)
321            .await
322            .context("Failed to call LLM for summarization")?;
323
324        match outcome {
325            ChatOutcome::Success(response) => response
326                .first_text()
327                .map(String::from)
328                .context("No text in summarization response"),
329            ChatOutcome::RateLimited => {
330                bail!("Rate limited during summarization")
331            }
332            ChatOutcome::InvalidRequest(msg) => {
333                bail!("Invalid request during summarization: {msg}")
334            }
335            ChatOutcome::ServerError(msg) => {
336                bail!("Server error during summarization: {msg}")
337            }
338        }
339    }
340
341    fn estimate_tokens(&self, messages: &[Message]) -> usize {
342        TokenEstimator::estimate_history(messages)
343    }
344
345    fn needs_compaction(&self, messages: &[Message]) -> bool {
346        if !self.config.auto_compact {
347            return false;
348        }
349
350        if messages.len() < self.config.min_messages_for_compaction {
351            return false;
352        }
353
354        let estimated_tokens = self.estimate_tokens(messages);
355        estimated_tokens > self.config.threshold_tokens
356    }
357
358    async fn compact_history(&self, messages: Vec<Message>) -> Result<CompactionResult> {
359        let original_count = messages.len();
360        let original_tokens = self.estimate_tokens(&messages);
361
362        // Ensure we have enough messages to compact
363        if messages.len() <= self.config.retain_recent {
364            return Ok(CompactionResult {
365                messages,
366                original_count,
367                new_count: original_count,
368                original_tokens,
369                new_tokens: original_tokens,
370            });
371        }
372
373        // Split messages: old messages to summarize, recent messages to keep
374        let mut split_point = messages.len().saturating_sub(self.config.retain_recent);
375        split_point = Self::split_point_preserves_tool_pairs_with_cap(
376            &messages,
377            split_point,
378            MAX_RETAINED_TAIL_MESSAGE_TOKENS,
379        );
380
381        let (to_summarize, to_keep) = messages.split_at(split_point);
382
383        // Summarize old messages
384        let summary = self.compact(to_summarize).await?;
385
386        // Build new message history
387        let mut new_messages = Vec::with_capacity(2 + to_keep.len());
388
389        // Add summary as a user message
390        new_messages.push(Message::user(format!("{SUMMARY_PREFIX}{summary}")));
391
392        // Add acknowledgment from assistant only when some recent tail remains.
393        // If compaction drops the entire retained tail due to the token cap, ending
394        // the request with this synthetic assistant message would act like assistant
395        // prefill and Anthropic rejects that shape.
396        if !to_keep.is_empty() {
397            new_messages.push(Message::assistant(SUMMARY_ACKNOWLEDGMENT));
398        }
399
400        // Add recent messages
401        new_messages.extend(to_keep.iter().cloned());
402
403        let new_count = new_messages.len();
404        let new_tokens = self.estimate_tokens(&new_messages);
405
406        Ok(CompactionResult {
407            messages: new_messages,
408            original_count,
409            new_count,
410            original_tokens,
411            new_tokens,
412        })
413    }
414}
415
416#[cfg(test)]
417mod tests {
418    use super::*;
419    use crate::llm::{ChatResponse, StopReason, Usage};
420    use std::sync::Mutex;
421
422    struct MockProvider {
423        summary_response: String,
424        requests: Option<Arc<Mutex<Vec<String>>>>,
425    }
426
427    impl MockProvider {
428        fn new(summary: &str) -> Self {
429            Self {
430                summary_response: summary.to_string(),
431                requests: None,
432            }
433        }
434
435        fn new_with_request_log(summary: &str, requests: Arc<Mutex<Vec<String>>>) -> Self {
436            Self {
437                summary_response: summary.to_string(),
438                requests: Some(requests),
439            }
440        }
441    }
442
443    #[async_trait]
444    impl LlmProvider for MockProvider {
445        async fn chat(&self, request: ChatRequest) -> Result<ChatOutcome> {
446            if let Some(requests) = &self.requests {
447                let mut entries = requests.lock().unwrap();
448                let user_prompt = request
449                    .messages
450                    .iter()
451                    .find_map(|message| match &message.content {
452                        Content::Text(text) => Some(text.clone()),
453                        Content::Blocks(blocks) => {
454                            let text = blocks
455                                .iter()
456                                .filter_map(|block| {
457                                    if let ContentBlock::Text { text } = block {
458                                        Some(text.as_str())
459                                    } else {
460                                        None
461                                    }
462                                })
463                                .collect::<Vec<_>>()
464                                .join("\n");
465                            if text.is_empty() { None } else { Some(text) }
466                        }
467                    })
468                    .unwrap_or_default();
469                entries.push(user_prompt);
470            }
471            Ok(ChatOutcome::Success(ChatResponse {
472                id: "test".to_string(),
473                content: vec![ContentBlock::Text {
474                    text: self.summary_response.clone(),
475                }],
476                model: "mock".to_string(),
477                stop_reason: Some(StopReason::EndTurn),
478                usage: Usage {
479                    input_tokens: 100,
480                    output_tokens: 50,
481                },
482            }))
483        }
484
485        fn model(&self) -> &'static str {
486            "mock-model"
487        }
488
489        fn provider(&self) -> &'static str {
490            "mock"
491        }
492    }
493
494    #[test]
495    fn test_needs_compaction_below_threshold() {
496        let provider = Arc::new(MockProvider::new("summary"));
497        let config = CompactionConfig::default()
498            .with_threshold_tokens(10_000)
499            .with_min_messages(5);
500        let compactor = LlmContextCompactor::new(provider, config);
501
502        // Only 3 messages, below min_messages
503        let messages = vec![
504            Message::user("Hello"),
505            Message::assistant("Hi"),
506            Message::user("How are you?"),
507        ];
508
509        assert!(!compactor.needs_compaction(&messages));
510    }
511
512    #[test]
513    fn test_needs_compaction_above_threshold() {
514        let provider = Arc::new(MockProvider::new("summary"));
515        let config = CompactionConfig::default()
516            .with_threshold_tokens(50) // Very low threshold
517            .with_min_messages(3);
518        let compactor = LlmContextCompactor::new(provider, config);
519
520        // Messages that exceed threshold
521        let messages = vec![
522            Message::user("Hello, this is a longer message to test compaction"),
523            Message::assistant(
524                "Hi there! This is also a longer response to help trigger compaction",
525            ),
526            Message::user("Great, let's continue with even more text here"),
527            Message::assistant("Absolutely, adding more content to ensure we exceed the threshold"),
528        ];
529
530        assert!(compactor.needs_compaction(&messages));
531    }
532
533    #[test]
534    fn test_needs_compaction_auto_disabled() {
535        let provider = Arc::new(MockProvider::new("summary"));
536        let config = CompactionConfig::default()
537            .with_threshold_tokens(10) // Very low
538            .with_min_messages(1)
539            .with_auto_compact(false);
540        let compactor = LlmContextCompactor::new(provider, config);
541
542        let messages = vec![
543            Message::user("Hello, this is a longer message"),
544            Message::assistant("Response here"),
545        ];
546
547        assert!(!compactor.needs_compaction(&messages));
548    }
549
550    #[tokio::test]
551    async fn test_compact_history() -> Result<()> {
552        let provider = Arc::new(MockProvider::new(
553            "User asked about Rust programming. Assistant explained ownership, borrowing, and lifetimes.",
554        ));
555        let config = CompactionConfig::default()
556            .with_retain_recent(2)
557            .with_min_messages(3);
558        let compactor = LlmContextCompactor::new(provider, config);
559
560        // Use longer messages to ensure compaction actually reduces tokens
561        let messages = vec![
562            Message::user(
563                "What is Rust? I've heard it's a systems programming language but I don't know much about it. Can you explain the key features and why people are excited about it?",
564            ),
565            Message::assistant(
566                "Rust is a systems programming language focused on safety, speed, and concurrency. It achieves memory safety without garbage collection through its ownership system. The key features include zero-cost abstractions, guaranteed memory safety, threads without data races, and minimal runtime.",
567            ),
568            Message::user(
569                "Tell me about ownership in detail. How does it work and what are the rules? I want to understand this core concept thoroughly.",
570            ),
571            Message::assistant(
572                "Ownership is Rust's central feature with three rules: each value has one owner, only one owner at a time, and the value is dropped when owner goes out of scope. This system prevents memory leaks, double frees, and dangling pointers at compile time.",
573            ),
574            Message::user("What about borrowing?"), // Keep
575            Message::assistant("Borrowing allows references to data without taking ownership."), // Keep
576        ];
577
578        let result = compactor.compact_history(messages).await?;
579
580        // Should have: summary message + ack + 2 recent messages = 4
581        assert_eq!(result.new_count, 4);
582        assert_eq!(result.original_count, 6);
583
584        // With longer original messages, compaction should reduce tokens
585        assert!(
586            result.new_tokens < result.original_tokens,
587            "Expected fewer tokens after compaction: new={} < original={}",
588            result.new_tokens,
589            result.original_tokens
590        );
591
592        // First message should be the summary
593        if let Content::Text(text) = &result.messages[0].content {
594            assert!(text.contains("Previous conversation summary"));
595        }
596
597        Ok(())
598    }
599
600    #[tokio::test]
601    async fn test_compact_history_too_few_messages() -> Result<()> {
602        let provider = Arc::new(MockProvider::new("summary"));
603        let config = CompactionConfig::default().with_retain_recent(5);
604        let compactor = LlmContextCompactor::new(provider, config);
605
606        // Only 3 messages, less than retain_recent
607        let messages = vec![
608            Message::user("Hello"),
609            Message::assistant("Hi"),
610            Message::user("Bye"),
611        ];
612
613        let result = compactor.compact_history(messages.clone()).await?;
614
615        // Should return original messages unchanged
616        assert_eq!(result.new_count, 3);
617        assert_eq!(result.messages.len(), 3);
618
619        Ok(())
620    }
621
622    #[test]
623    fn test_format_messages_for_summary() {
624        let messages = vec![Message::user("Hello"), Message::assistant("Hi there!")];
625
626        let formatted = LlmContextCompactor::<MockProvider>::format_messages_for_summary(&messages);
627
628        assert!(formatted.contains("User: Hello"));
629        assert!(formatted.contains("Assistant: Hi there!"));
630    }
631
632    #[test]
633    fn test_format_messages_for_summary_truncates_tool_results_unicode_safely() {
634        let long_unicode = "é".repeat(600);
635
636        let messages = vec![Message {
637            role: Role::Assistant,
638            content: Content::Blocks(vec![ContentBlock::ToolResult {
639                tool_use_id: "tool-1".to_string(),
640                content: long_unicode,
641                is_error: Some(false),
642            }]),
643        }];
644
645        let formatted = LlmContextCompactor::<MockProvider>::format_messages_for_summary(&messages);
646
647        assert!(formatted.contains("... (truncated)"));
648    }
649
650    #[tokio::test]
651    async fn test_compact_filters_summary_messages() -> Result<()> {
652        let requests = Arc::new(Mutex::new(Vec::new()));
653        let provider = Arc::new(MockProvider::new_with_request_log(
654            "Fresh summary",
655            requests.clone(),
656        ));
657        let config = CompactionConfig::default().with_min_messages(1);
658        let compactor = LlmContextCompactor::new(provider, config);
659
660        let messages = vec![
661            Message::user(format!("{SUMMARY_PREFIX}already compacted context")),
662            Message::assistant("Continue with the next task using this context."),
663        ];
664
665        let summary = compactor.compact(&messages).await?;
666
667        {
668            let recorded = requests.lock().unwrap();
669            assert_eq!(recorded.len(), 1);
670            assert_eq!(summary, "Fresh summary");
671            assert!(recorded[0].contains("Continue with the next task using this context."));
672            assert!(!recorded[0].contains("already compacted context"));
673            drop(recorded);
674        }
675
676        Ok(())
677    }
678
679    #[tokio::test]
680    async fn test_compact_history_ignores_prior_summary_in_candidate_payload() -> Result<()> {
681        let requests = Arc::new(Mutex::new(Vec::new()));
682        let provider = Arc::new(MockProvider::new_with_request_log(
683            "Fresh history summary",
684            requests.clone(),
685        ));
686        let config = CompactionConfig::default()
687            .with_retain_recent(2)
688            .with_min_messages(1);
689        let compactor = LlmContextCompactor::new(provider, config);
690
691        let messages = vec![
692            Message::user(format!("{SUMMARY_PREFIX}already compacted context")),
693            Message::assistant("Current turn content from the latest exchange."),
694            Message::assistant("Recent message that should stay."),
695            Message::user("Newest note that should stay."),
696        ];
697
698        let result = compactor.compact_history(messages).await?;
699
700        {
701            let recorded = requests.lock().unwrap();
702            assert_eq!(recorded.len(), 1);
703            assert!(recorded[0].contains("Current turn content from the latest exchange."));
704            assert!(!recorded[0].contains("already compacted context"));
705            drop(recorded);
706        }
707        assert_eq!(result.new_count, 4);
708
709        Ok(())
710    }
711
712    #[tokio::test]
713    async fn test_compact_history_is_no_op_when_candidate_window_has_only_summaries() -> Result<()>
714    {
715        let requests = Arc::new(Mutex::new(Vec::new()));
716        let provider = Arc::new(MockProvider::new_with_request_log(
717            "This summary should not be used",
718            requests.clone(),
719        ));
720        let config = CompactionConfig::default()
721            .with_retain_recent(2)
722            .with_min_messages(1);
723        let compactor = LlmContextCompactor::new(provider, config);
724
725        let messages = vec![
726            Message::user(format!("{SUMMARY_PREFIX}first prior compacted section")),
727            Message::assistant(format!("{SUMMARY_PREFIX}second prior compacted section")),
728            Message::user(format!("{SUMMARY_PREFIX}third prior compacted section")),
729            Message::assistant("final short note"),
730        ];
731
732        let result = compactor.compact_history(messages).await?;
733
734        {
735            let recorded = requests.lock().unwrap();
736            assert!(recorded.is_empty());
737            drop(recorded);
738        }
739        assert_eq!(result.new_count, 4);
740        assert_eq!(result.messages.len(), 4);
741
742        if let Content::Text(text) = &result.messages[0].content {
743            assert!(text.contains(COMPACT_EMPTY_SUMMARY));
744        } else {
745            panic!("Expected summary text in first message");
746        }
747
748        Ok(())
749    }
750
751    #[tokio::test]
752    async fn test_compact_history_preserves_tool_use_tool_result_pairs() -> Result<()> {
753        let provider = Arc::new(MockProvider::new("Summary of earlier conversation."));
754        let config = CompactionConfig::default()
755            .with_retain_recent(2)
756            .with_min_messages(3);
757        let compactor = LlmContextCompactor::new(provider, config);
758
759        // Build a history where the split_point (len - retain_recent = 5 - 2 = 3)
760        // would land exactly on the user tool_result message at index 3,
761        // which would orphan it from its assistant tool_use at index 2.
762        let messages = vec![
763            // index 0: user
764            Message::user("What files are in the project?"),
765            // index 1: assistant text
766            Message::assistant("Let me check that for you."),
767            // index 2: assistant with tool_use
768            Message {
769                role: Role::Assistant,
770                content: Content::Blocks(vec![ContentBlock::ToolUse {
771                    id: "tool_1".to_string(),
772                    name: "list_files".to_string(),
773                    input: serde_json::json!({}),
774                    thought_signature: None,
775                }]),
776            },
777            // index 3: user with tool_result (naive split would land here)
778            Message {
779                role: Role::User,
780                content: Content::Blocks(vec![ContentBlock::ToolResult {
781                    tool_use_id: "tool_1".to_string(),
782                    content: "file1.rs\nfile2.rs".to_string(),
783                    is_error: None,
784                }]),
785            },
786            // index 4: assistant final response
787            Message::assistant("The project contains file1.rs and file2.rs."),
788        ];
789
790        let result = compactor.compact_history(messages).await?;
791
792        // The split_point should have been adjusted back from 3 to 2,
793        // so to_keep includes: [assistant tool_use, user tool_result, assistant response]
794        // Plus summary + ack = 5 total
795        assert_eq!(result.new_count, 5);
796
797        // Verify the kept messages include the tool_use/tool_result pair
798        // After summary + ack, the third message should be the assistant with tool_use
799        let kept_assistant = &result.messages[2];
800        if let Content::Blocks(blocks) = &kept_assistant.content {
801            assert!(
802                blocks
803                    .iter()
804                    .any(|b| matches!(b, ContentBlock::ToolUse { .. })),
805                "Expected assistant tool_use in kept messages"
806            );
807        } else {
808            panic!("Expected Blocks content for assistant tool_use message");
809        }
810
811        // The fourth message should be the user tool_result
812        let kept_user = &result.messages[3];
813        if let Content::Blocks(blocks) = &kept_user.content {
814            assert!(
815                blocks
816                    .iter()
817                    .any(|b| matches!(b, ContentBlock::ToolResult { .. })),
818                "Expected user tool_result in kept messages"
819            );
820        } else {
821            panic!("Expected Blocks content for user tool_result message");
822        }
823
824        Ok(())
825    }
826
827    #[tokio::test]
828    async fn test_compact_history_preserves_tool_result_tool_use_pairs() -> Result<()> {
829        let provider = Arc::new(MockProvider::new("Summary around tool pair."));
830        let config = CompactionConfig::default()
831            .with_retain_recent(2)
832            .with_min_messages(1);
833        let compactor = LlmContextCompactor::new(provider, config);
834
835        // Build a history where split_point would land on tool-use tool-result crossing in the
836        // opposite direction:
837        // ... user tool_result | assistant tool_use ...
838        let messages = vec![
839            Message::user("Start a workflow"),
840            Message {
841                role: Role::User,
842                content: Content::Blocks(vec![ContentBlock::ToolResult {
843                    tool_use_id: "tool_odd".to_string(),
844                    content: "prior result".to_string(),
845                    is_error: None,
846                }]),
847            },
848            Message {
849                role: Role::Assistant,
850                content: Content::Blocks(vec![ContentBlock::ToolUse {
851                    id: "tool_odd".to_string(),
852                    name: "follow_up".to_string(),
853                    input: serde_json::json!({}),
854                    thought_signature: None,
855                }]),
856            },
857            Message::assistant("Follow up done."),
858        ];
859
860        let result = compactor.compact_history(messages).await?;
861
862        // Split-point starts at 2 and is adjusted back to 1, keeping the tool result and tool use.
863        assert_eq!(result.new_count, 5);
864
865        // tool_result should remain with the kept tail.
866        let kept_result = &result.messages[2];
867        if let Content::Blocks(blocks) = &kept_result.content {
868            assert!(
869                blocks
870                    .iter()
871                    .any(|b| matches!(b, ContentBlock::ToolResult { .. })),
872                "Expected kept user tool_result in retained tail"
873            );
874        } else {
875            panic!("Expected tool_result blocks in retained tail");
876        }
877
878        // tool_use should remain with the kept tail.
879        let kept_tool_use = &result.messages[3];
880        if let Content::Blocks(blocks) = &kept_tool_use.content {
881            assert!(
882                blocks
883                    .iter()
884                    .any(|b| matches!(b, ContentBlock::ToolUse { .. })),
885                "Expected kept assistant tool_use in retained tail"
886            );
887        } else {
888            panic!("Expected tool_use blocks in retained tail");
889        }
890
891        Ok(())
892    }
893
894    #[tokio::test]
895    async fn test_compact_history_retained_tail_is_token_capped() -> Result<()> {
896        let provider = Arc::new(MockProvider::new(
897            "Project summary with a long context and technical context.",
898        ));
899        let config = CompactionConfig::default()
900            .with_retain_recent(8)
901            .with_min_messages(1)
902            .with_threshold_tokens(1);
903        let compactor = LlmContextCompactor::new(provider, config);
904
905        let mut messages = Vec::new();
906
907        // Older messages that will be summarized away.
908        messages.extend((0..6).map(|index| Message::user(format!("pre-compaction noise {index}"))));
909
910        // Newer long messages: intentionally large to force retained-tail truncation.
911        messages.extend(
912            (0..8).map(|index| Message::assistant(format!("kept-{index}: {}", "x".repeat(12_000)))),
913        );
914
915        let result = compactor.compact_history(messages).await?;
916
917        // The retained tail should be token capped and therefore shorter than retain_recent.
918        let retained_tail = &result.messages[2..];
919        assert!(retained_tail.len() < 8);
920
921        let mut latest_index = -1i32;
922        let mut all_retained = true;
923        for message in retained_tail {
924            if let Content::Text(text) = &message.content {
925                if let Some(number) = text.split(':').next().and_then(|prefix| {
926                    prefix
927                        .strip_prefix("kept-")
928                        .and_then(|rest| rest.parse::<i32>().ok())
929                }) {
930                    if number >= 0 {
931                        latest_index = latest_index.max(number);
932                    }
933                } else {
934                    all_retained = false;
935                }
936            } else {
937                all_retained = false;
938            }
939        }
940
941        assert!(all_retained);
942        assert_eq!(latest_index, 7);
943        assert!(
944            TokenEstimator::estimate_history(retained_tail) <= MAX_RETAINED_TAIL_MESSAGE_TOKENS
945        );
946        assert!(compactor.needs_compaction(&result.messages));
947
948        Ok(())
949    }
950
951    #[tokio::test]
952    async fn test_compact_history_skips_summary_ack_when_retained_tail_is_empty() -> Result<()> {
953        let provider = Arc::new(MockProvider::new("Summary for oversized user turn."));
954        let config = CompactionConfig::default()
955            .with_retain_recent(1)
956            .with_min_messages(1)
957            .with_threshold_tokens(1);
958        let compactor = LlmContextCompactor::new(provider, config);
959
960        let messages = vec![
961            Message::assistant("Earlier assistant context."),
962            Message::user(format!("oversized-user-turn: {}", "x".repeat(200_000))),
963        ];
964
965        let result = compactor.compact_history(messages).await?;
966
967        assert_eq!(result.new_count, 1);
968        assert_eq!(result.messages.len(), 1);
969
970        let only_message = &result.messages[0];
971        assert_eq!(only_message.role, Role::User);
972
973        if let Content::Text(text) = &only_message.content {
974            assert!(text.contains("Previous conversation summary"));
975            assert!(!text.contains(SUMMARY_ACKNOWLEDGMENT));
976        } else {
977            panic!("Expected summary text when retained tail is empty");
978        }
979
980        Ok(())
981    }
982}