1use crate::counter::{TiktokenTokenCounter, TokenCounter};
2use crate::limits::create_budget_for_model;
3use crate::{BudgetStrategy, TokenBudget};
4use bamboo_agent_core::MessagePhase;
5use bamboo_agent_core::{
6 CompressionEvent, CompressionTriggerType, ConversationSummary, Message, Session,
7};
8
9fn is_skill_tool_chain_message(message: &Message) -> bool {
11 message.tool_calls.as_ref().is_some_and(|calls| {
12 calls.iter().any(|call| {
13 matches!(
14 call.function.name.as_str(),
15 "load_skill" | "read_skill_resource"
16 )
17 })
18 })
19}
20use chrono::Utc;
21use std::collections::HashSet;
22
23#[derive(Debug, Clone)]
25pub enum CompressionPlanError {
26 ExposureGateNotMet {
28 usage_percent: f64,
29 trigger_percent: u8,
30 },
31 NoActiveMessages,
33 NotEnoughMessages { non_system_count: usize },
35 NothingToCompress {
37 anchor_index: usize,
38 non_system_count: usize,
39 },
40}
41
42impl std::fmt::Display for CompressionPlanError {
43 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
44 match self {
45 Self::ExposureGateNotMet {
46 usage_percent,
47 trigger_percent,
48 } => write!(
49 f,
50 "compression threshold not reached (usage={:.1}%, trigger={}%)",
51 usage_percent, trigger_percent
52 ),
53 Self::NoActiveMessages => write!(f, "no active messages to compress"),
54 Self::NotEnoughMessages { non_system_count } => write!(
55 f,
56 "not enough non-system messages to compress ({}, need >=3)",
57 non_system_count
58 ),
59 Self::NothingToCompress {
60 anchor_index,
61 non_system_count,
62 } => write!(
63 f,
64 "nothing to compress after anchor/keep splitting (anchor_index={}, non_system={})",
65 anchor_index, non_system_count
66 ),
67 }
68 }
69}
70
71#[derive(Debug, Clone)]
74pub struct ContextCompressionExposure {
75 pub budget: TokenBudget,
76 pub active_tokens: u32,
77 pub active_usage_percent: f64,
78 pub active_usage_percent_rounded: u8,
79 pub should_expose_tool: bool,
80}
81
82#[derive(Debug, Clone)]
85pub struct CompressionPlan {
86 pub compressed_message_ids: Vec<String>,
87 pub messages_to_summarize: Vec<Message>,
88 pub summary_tokens: u32,
89 pub summary_content: String,
90 pub active_usage_before_percent: f64,
91 pub active_usage_after_percent: f64,
92 pub trigger_percent: u8,
93 pub target_percent: u8,
94 pub segments_removed: usize,
95 pub trigger_type: CompressionTriggerType,
96 pub compression_ratio: f64,
97 pub model_used: Option<String>,
98 pub latency_ms: u64,
99}
100
101pub fn context_window_usage_percent(total_tokens: u32, context_window_tokens: u32) -> f64 {
102 if context_window_tokens == 0 {
103 return 0.0;
104 }
105 (total_tokens as f64 / context_window_tokens as f64) * 100.0
106}
107
108pub fn normalized_trigger_percent(trigger_percent: u8) -> f64 {
109 match trigger_percent {
110 0 => 100.0,
111 1..=100 => trigger_percent as f64,
112 _ => 100.0,
113 }
114}
115
116pub fn estimate_context_compression_exposure(
119 session: &Session,
120 model_name: &str,
121 configured_budget: Option<&TokenBudget>,
122) -> ContextCompressionExposure {
123 let budget = configured_budget
124 .cloned()
125 .unwrap_or_else(|| create_budget_for_model(model_name, BudgetStrategy::default()));
126 let counter = TiktokenTokenCounter::default();
127 let active_messages = active_messages_for_budget(session);
128 let active_message_tokens = counter.count_messages(&active_messages);
129 let summary_tokens = session
130 .conversation_summary
131 .as_ref()
132 .map(|summary| counter.count_messages(&[compression_summary_message(&summary.content)]))
133 .unwrap_or(0);
134 let active_tokens = active_message_tokens.saturating_add(summary_tokens);
135 let context_window = budget.max_context_tokens;
138 let estimated_usage = context_window_usage_percent(active_tokens, context_window);
139 let usage = session
140 .token_usage
141 .as_ref()
142 .and_then(|token_usage| {
143 let denominator = if token_usage.max_context_tokens > 0 {
144 token_usage.max_context_tokens
145 } else if token_usage.budget_limit > 0 {
146 token_usage.budget_limit
148 } else {
149 context_window
150 };
151 (denominator > 0).then_some(context_window_usage_percent(
152 token_usage.total_tokens,
153 denominator,
154 ))
155 })
156 .map(|persisted_usage| persisted_usage.max(estimated_usage))
157 .unwrap_or(estimated_usage);
158
159 let rounded = usage.clamp(0.0, 100.0).round() as u8;
160 let trigger_tokens = budget.compression_trigger_context_tokens();
161 let trigger_percent = if budget.max_context_tokens > 0 {
162 (trigger_tokens as f64 / budget.max_context_tokens as f64) * 100.0
163 } else {
164 0.0
165 };
166 let threshold_reached = usage >= trigger_percent;
167
168 let non_system_count = active_messages
173 .iter()
174 .filter(|m| !matches!(m.role, bamboo_agent_core::Role::System))
175 .count();
176
177 let should_expose_tool = threshold_reached && non_system_count >= 3;
178
179 ContextCompressionExposure {
180 budget,
181 active_tokens,
182 active_usage_percent: usage,
183 active_usage_percent_rounded: rounded,
184 should_expose_tool,
185 }
186}
187
188pub fn build_compression_plan_with_summary(
191 session: &Session,
192 model_name: &str,
193 configured_budget: Option<&TokenBudget>,
194 summary_content: String,
195) -> Result<CompressionPlan, CompressionPlanError> {
196 build_compression_plan_with_summary_internal(
197 session,
198 model_name,
199 configured_budget,
200 summary_content,
201 true,
202 CompressionTriggerType::Auto,
203 )
204}
205
206pub fn build_forced_compression_plan_with_summary(
212 session: &Session,
213 model_name: &str,
214 configured_budget: Option<&TokenBudget>,
215 summary_content: String,
216 trigger_type: CompressionTriggerType,
217) -> Result<CompressionPlan, CompressionPlanError> {
218 build_compression_plan_with_summary_internal(
219 session,
220 model_name,
221 configured_budget,
222 summary_content,
223 false,
224 trigger_type,
225 )
226}
227
228fn build_compression_plan_with_summary_internal(
229 session: &Session,
230 model_name: &str,
231 configured_budget: Option<&TokenBudget>,
232 summary_content: String,
233 require_exposure_gate: bool,
234 trigger_type: CompressionTriggerType,
235) -> Result<CompressionPlan, CompressionPlanError> {
236 let exposure = estimate_context_compression_exposure(session, model_name, configured_budget);
237 if require_exposure_gate && !exposure.should_expose_tool {
238 return Err(CompressionPlanError::ExposureGateNotMet {
239 usage_percent: exposure.active_usage_percent,
240 trigger_percent: exposure.budget.compression_trigger_percent,
241 });
242 }
243
244 let budget = &exposure.budget;
245 let counter = TiktokenTokenCounter::default();
246 let summary_message = compression_summary_message(&summary_content);
247 let summary_tokens = counter.count_messages(&[summary_message]);
248
249 let context_window = budget.max_context_tokens;
250 let target_limit = budget.compression_target_context_tokens();
251
252 let mut active_messages = active_messages_for_budget(session);
253 if active_messages.is_empty() {
254 tracing::debug!("compression plan: no active messages, cannot build plan");
255 return Err(CompressionPlanError::NoActiveMessages);
256 }
257
258 let system_messages: Vec<Message> = active_messages
259 .iter()
260 .filter(|m| matches!(m.role, bamboo_agent_core::Role::System))
261 .cloned()
262 .collect();
263 let system_tokens = counter.count_messages(&system_messages);
264 let reserved_non_window_tokens = system_tokens.saturating_add(summary_tokens);
265 let window_limit = target_limit.saturating_sub(reserved_non_window_tokens);
266
267 let non_system: Vec<Message> = active_messages
268 .drain(..)
269 .filter(|m| !matches!(m.role, bamboo_agent_core::Role::System))
270 .collect();
271
272 if non_system.len() < 3 {
273 tracing::debug!(
274 "compression plan: not enough non-system messages ({}), need at least 3",
275 non_system.len()
276 );
277 return Err(CompressionPlanError::NotEnoughMessages {
278 non_system_count: non_system.len(),
279 });
280 }
281
282 let user_indexes = non_system
283 .iter()
284 .enumerate()
285 .filter_map(|(index, message)| {
286 matches!(message.role, bamboo_agent_core::Role::User).then_some(index)
287 })
288 .collect::<Vec<_>>();
289 let keep_user_count = user_indexes.len().min(3);
290 let anchor_index = if keep_user_count > 0 {
291 user_indexes[user_indexes.len() - keep_user_count]
292 } else {
293 non_system
294 .iter()
295 .rposition(|m| matches!(m.role, bamboo_agent_core::Role::User))
296 .unwrap_or(non_system.len().saturating_sub(1))
297 };
298 let protected_user_ids: HashSet<String> = if keep_user_count > 0 {
299 user_indexes[user_indexes.len() - keep_user_count..]
300 .iter()
301 .filter_map(|idx| non_system.get(*idx))
302 .map(|message| message.id.clone())
303 .collect()
304 } else {
305 HashSet::new()
306 };
307
308 tracing::debug!(
309 "compression plan: context_window={}, target_limit={}, system_tokens={}, summary_tokens={}, window_limit={}, non_system_messages={}, keep_user_count={}, keep_from_index={}",
310 context_window, target_limit, system_tokens, summary_tokens, window_limit, non_system.len(), keep_user_count, anchor_index
311 );
312
313 let mut messages_to_summarize = non_system[..anchor_index].to_vec();
318
319 let mut never_compress_ids: Vec<String> = messages_to_summarize
321 .iter()
322 .filter(|m| m.never_compress || is_skill_tool_chain_message(m))
323 .map(|m| m.id.clone())
324 .collect();
325
326 let skill_call_ids: Vec<String> = messages_to_summarize
328 .iter()
329 .filter(|m| is_skill_tool_chain_message(m))
330 .flat_map(|m| m.tool_calls.iter().flatten().map(|c| c.id.clone()))
331 .collect();
332 if !skill_call_ids.is_empty() {
333 for m in &*messages_to_summarize {
334 if let Some(ref call_id) = m.tool_call_id {
335 if skill_call_ids.contains(call_id) && !never_compress_ids.contains(&m.id) {
336 never_compress_ids.push(m.id.clone());
337 }
338 }
339 }
340 }
341
342 if !never_compress_ids.is_empty() {
343 messages_to_summarize.retain(|m| !never_compress_ids.contains(&m.id));
344 }
345
346 let non_system_count = non_system.len();
347 let mut messages_to_keep = non_system[anchor_index..].to_vec();
348 for id in &never_compress_ids {
350 if let Some(msg) = non_system.iter().find(|m| &m.id == id) {
351 if !messages_to_keep.iter().any(|m| m.id == *id) {
352 messages_to_keep.push(msg.clone());
353 }
354 }
355 }
356
357 while !messages_to_keep.is_empty() {
358 let keep_tokens = counter.count_messages(&messages_to_keep);
359 if keep_tokens <= window_limit {
360 break;
361 }
362
363 let Some(remove_index) = messages_to_keep.iter().position(|message| {
364 !protected_user_ids.contains(message.id.as_str())
365 && !never_compress_ids.contains(&message.id)
366 }) else {
367 break;
369 };
370 let moved = messages_to_keep.remove(remove_index);
371 messages_to_summarize.push(moved);
372 }
373
374 if messages_to_summarize.is_empty() {
375 tracing::debug!(
376 "compression plan: messages_to_summarize is empty after anchor/keep splitting"
377 );
378 return Err(CompressionPlanError::NothingToCompress {
379 anchor_index,
380 non_system_count,
381 });
382 }
383
384 let compressed_message_ids = messages_to_summarize
385 .iter()
386 .map(|message| message.id.clone())
387 .collect::<Vec<_>>();
388
389 let keep_tokens = counter.count_messages(&messages_to_keep);
390 let active_before = exposure.active_usage_percent;
391 let active_after = if context_window == 0 {
394 0.0
395 } else {
396 let after_total = reserved_non_window_tokens.saturating_add(keep_tokens);
397 (after_total as f64 / context_window as f64) * 100.0
398 };
399
400 let segmenter = crate::segmenter::MessageSegmenter::new();
403 let segments_removed = segmenter.segment(messages_to_summarize.clone()).len();
404
405 Ok(CompressionPlan {
406 compressed_message_ids,
407 messages_to_summarize,
408 summary_tokens,
409 summary_content,
410 active_usage_before_percent: active_before,
411 active_usage_after_percent: active_after,
412 trigger_percent: budget.compression_trigger_percent,
413 target_percent: budget.compression_target_percent,
414 segments_removed,
415 trigger_type,
416 compression_ratio: 0.0,
417 model_used: None,
418 latency_ms: 0,
419 })
420}
421
422pub(super) fn extract_recently_modified_files(messages: &[Message]) -> Vec<(String, String)> {
425 let mut files = Vec::new();
426 for message in messages {
427 if let Some(ref tool_calls) = message.tool_calls {
428 for call in tool_calls {
429 let tool_name = call.function.name.as_str();
430 if !matches!(tool_name, "Write" | "Edit" | "Bash") {
431 continue;
432 }
433 let args = &call.function.arguments;
434 if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(args) {
435 if let Some(path) = parsed.get("file_path").and_then(|v| v.as_str()) {
436 files.push((path.to_string(), tool_name.to_string()));
437 } else if let Some(cmd) = parsed.get("command").and_then(|v| v.as_str()) {
438 for part in cmd.split_whitespace() {
440 if part.contains('/')
441 && (part.ends_with(".rs")
442 || part.ends_with(".ts")
443 || part.ends_with(".js")
444 || part.ends_with(".toml")
445 || part.ends_with(".json")
446 || part.ends_with(".md"))
447 {
448 files.push((part.to_string(), "Bash".to_string()));
449 }
450 }
451 }
452 }
453 }
454 }
455 }
456 files.truncate(10);
457 files
458}
459
460pub(super) fn extract_key_decisions(messages: &[Message], limit: usize) -> Vec<String> {
462 let decision_keywords = [
463 "decided to",
464 "approach is",
465 "use ",
466 "using ",
467 "we'll go with",
468 "the plan is",
469 "strategy:",
470 "solution:",
471 "chose to",
472 "switched to",
473 "refactored to",
474 "migrated to",
475 "replaced with",
476 ];
477 let mut decisions = Vec::new();
478 for message in messages {
479 if !matches!(message.role, bamboo_agent_core::Role::Assistant) {
480 continue;
481 }
482 let content = &message.content;
483 for line in content.lines() {
484 let line_lower = line.to_lowercase();
485 if decision_keywords.iter().any(|kw| line_lower.contains(kw)) {
486 let truncated: String = line.chars().take(200).collect();
487 decisions.push(truncated);
488 if decisions.len() >= limit {
489 return decisions;
490 }
491 }
492 }
493 }
494 decisions
495}
496
497fn build_post_compaction_recovery_message(
501 compressed_messages: &[Message],
502 session: &Session,
503) -> Option<Message> {
504 if compressed_messages.is_empty() {
505 return None;
506 }
507
508 let mut sections = Vec::new();
509
510 let files = extract_recently_modified_files(compressed_messages);
512 if !files.is_empty() {
513 let mut section = String::from("## Recently Modified Files\n");
514 for (path, tool) in &files {
515 section.push_str(&format!("- {} ({})\n", path, tool));
516 }
517 sections.push(section);
518 }
519
520 if let Some(ref task_list) = session.task_list {
522 let active_items: Vec<_> = task_list
523 .items
524 .iter()
525 .filter(|item| !matches!(item.status, bamboo_domain::TaskItemStatus::Completed))
526 .collect();
527 if !active_items.is_empty() {
528 let mut section = String::from("## Active Tasks\n");
529 for item in active_items.iter().take(10) {
530 section.push_str(&format!("- [{:?}] {}\n", item.status, item.description));
531 }
532 sections.push(section);
533 }
534 }
535
536 let decisions = extract_key_decisions(compressed_messages, 5);
538 if !decisions.is_empty() {
539 let mut section = String::from("## Key Decisions\n");
540 for decision in &decisions {
541 section.push_str(&format!("- {}\n", decision));
542 }
543 sections.push(section);
544 }
545
546 if sections.is_empty() {
547 return None;
548 }
549
550 let mut content = String::from("[post-compaction-recovery]\nContext extracted from compressed messages for continued work.\n\n");
551 content.push_str(§ions.join("\n"));
552
553 let mut message = Message::assistant(content, None);
554 message.never_compress = true;
555 Some(message)
556}
557
558struct SummaryQualityMetrics {
559 file_coverage: f64,
560 decision_coverage: f64,
561}
562
563fn validate_summary_quality(summary: &str, messages: &[Message]) -> SummaryQualityMetrics {
564 let files = extract_recently_modified_files(messages);
565 let decisions = extract_key_decisions(messages, 10);
566
567 let files_mentioned = files
568 .iter()
569 .filter(|(path, _)| summary.contains(path.as_str()))
570 .count();
571 let file_coverage = if files.is_empty() {
572 1.0
573 } else {
574 files_mentioned as f64 / files.len() as f64
575 };
576
577 let decisions_mentioned = decisions
578 .iter()
579 .filter(|d| {
580 let check_len = d.len().min(50);
581 summary.contains(&d[..check_len])
582 })
583 .count();
584 let decision_coverage = if decisions.is_empty() {
585 1.0
586 } else {
587 decisions_mentioned as f64 / decisions.len() as f64
588 };
589
590 SummaryQualityMetrics {
591 file_coverage,
592 decision_coverage,
593 }
594}
595
596pub fn apply_compression_plan(session: &mut Session, plan: CompressionPlan) -> usize {
597 let compressed_ids: HashSet<&str> = plan
598 .compressed_message_ids
599 .iter()
600 .map(String::as_str)
601 .collect();
602
603 let mut changed_indexes = Vec::new();
604 for (index, message) in session.messages.iter_mut().enumerate() {
605 if message.compressed || !compressed_ids.contains(message.id.as_str()) {
606 continue;
607 }
608 message.compressed = true;
609 changed_indexes.push(index);
610 }
611
612 if changed_indexes.is_empty() {
613 return 0;
614 }
615
616 let event = CompressionEvent::new(
617 changed_indexes.len(),
618 plan.segments_removed,
619 plan.active_usage_before_percent,
620 plan.active_usage_after_percent,
621 plan.summary_tokens,
622 plan.trigger_type,
623 plan.compression_ratio,
624 plan.model_used.clone(),
625 plan.latency_ms,
626 );
627 let event_id = event.id.clone();
628 for index in changed_indexes {
629 session.messages[index].compressed_by_event_id = Some(event_id.clone());
630 }
631 session.compression_events.push(event);
632 session.conversation_summary = Some(ConversationSummary::new(
633 &plan.summary_content,
634 plan.compressed_message_ids.len(),
635 plan.summary_tokens,
636 ));
637
638 let compressed_messages: Vec<Message> = session
641 .messages
642 .iter()
643 .filter(|m| compressed_ids.contains(m.id.as_str()))
644 .cloned()
645 .collect();
646 if let Some(recovery) = build_post_compaction_recovery_message(&compressed_messages, session) {
647 let insert_pos = session
649 .messages
650 .iter()
651 .rposition(|m| matches!(m.role, bamboo_agent_core::Role::User) && !m.compressed)
652 .map(|pos| pos + 1)
653 .unwrap_or(session.messages.len());
654 session.messages.insert(insert_pos, recovery);
655 }
656
657 let quality = validate_summary_quality(&plan.summary_content, &compressed_messages);
658 if quality.file_coverage < 0.5 || quality.decision_coverage < 0.3 {
659 tracing::warn!(
660 "[{}] Summary quality: file_coverage={:.0}%, decision_coverage={:.0}%",
661 session.id,
662 quality.file_coverage * 100.0,
663 quality.decision_coverage * 100.0
664 );
665 }
666
667 let counter = TiktokenTokenCounter::default();
673 let remaining_active: Vec<_> = session
674 .messages
675 .iter()
676 .filter(|m| !m.compressed)
677 .cloned()
678 .collect();
679 let system_msgs: Vec<_> = remaining_active
680 .iter()
681 .filter(|m| matches!(m.role, bamboo_agent_core::Role::System))
682 .cloned()
683 .collect();
684 let window_msgs: Vec<_> = remaining_active
685 .iter()
686 .filter(|m| !matches!(m.role, bamboo_agent_core::Role::System))
687 .cloned()
688 .collect();
689 let system_tokens = counter.count_messages(&system_msgs);
690 let new_summary_tokens = plan.summary_tokens;
691 let window_tokens = counter.count_messages(&window_msgs);
692 let total_tokens = system_tokens
693 .saturating_add(new_summary_tokens)
694 .saturating_add(window_tokens);
695 let previous_usage = session.token_usage.take();
696 let budget_limit = previous_usage
697 .as_ref()
698 .map(|u| {
699 if u.max_context_tokens > 0 {
700 u.max_context_tokens
701 } else {
702 u.budget_limit
703 }
704 })
705 .unwrap_or(0);
706 let max_context_tokens = previous_usage
707 .as_ref()
708 .map(|u| u.max_context_tokens)
709 .unwrap_or(0);
710 session.token_usage = Some(bamboo_agent_core::TokenBudgetUsage {
711 system_tokens,
712 summary_tokens: new_summary_tokens,
713 window_tokens,
714 total_tokens,
715 max_context_tokens,
716 budget_limit,
717 truncation_occurred: false,
718 segments_removed: 0,
719 prompt_cached_tool_outputs: 0,
720 thinking_tokens: 0,
721 cache_read_input_tokens: 0,
722 });
723
724 session.updated_at = Utc::now();
725 plan.compressed_message_ids.len()
726}
727
728pub fn compression_summary_message(summary_content: &str) -> Message {
729 Message::system(format!(
730 "<!-- CONVERSATION_SUMMARY_START -->\n\
731 ## Previous Conversation Summary\n\
732 The following is compressed historical context for continuity only.\n\
733 It is background memory, not a new user request. Follow the current task list and recent messages over this summary when they conflict.\n\n\
734 {}\n\
735 <!-- CONVERSATION_SUMMARY_END -->",
736 summary_content
737 ))
738}
739
740pub fn active_messages_for_budget(session: &Session) -> Vec<Message> {
741 session
742 .messages
743 .iter()
744 .filter(|message| !message.compressed)
745 .cloned()
746 .collect()
747}
748
749pub fn summary_source_messages(session: &Session) -> Vec<Message> {
750 session
751 .messages
752 .iter()
753 .filter(|message| !message.compressed)
754 .filter(|message| !matches!(message.role, bamboo_agent_core::Role::System))
755 .cloned()
756 .collect()
757}
758
759pub fn build_summary_prompt(
760 session: &Session,
761 messages: &[Message],
762 existing_summary: Option<&str>,
763) -> String {
764 let mut content = String::new();
765 content.push_str(
766 "You are compressing conversation history for continued work. Produce a compact but reliable working-memory summary.\n\n",
767 );
768 content.push_str(
769 "Critical requirements:\n- First capture the in-flight work right before compression (what was being done, where, and with which tool/file)\n- Distinguish clearly between ACTIVE work, COMPLETED work, and OBSOLETE or superseded work\n- Do not restate old tasks as active unless they are still unresolved\n- The current task list is the source of truth for what is actively being worked on\n- Preserve constraints, decisions, file paths, code changes, errors, tool findings, blockers, and the next step\n- If earlier plans conflict with the current task list or newer messages, treat the earlier plans as obsolete or completed\n- Explicitly evaluate each clear user requirement (e.g. requirement 1, requirement 2) with a status and evidence\n- Return only summary text in the same language as the conversation\n\n",
770 );
771
772 if let Some(existing) = existing_summary.map(str::trim).filter(|s| !s.is_empty()) {
773 content.push_str("## Existing Summary\n");
774 content.push_str(existing);
775 content.push_str("\n\n");
776 }
777
778 let task_list_prompt = session.format_task_list_for_prompt();
779 if !task_list_prompt.trim().is_empty() {
780 content.push_str("## Current Task List\n");
781 content.push_str(task_list_prompt.trim());
782 content.push_str("\n\n");
783 }
784
785 content.push_str(
786 "## Required Output Sections\n1. Pre-compression in-flight work (what was being done immediately before compression)\n2. Current active objective\n3. Requirement checklist (Requirement | Status: completed/in_progress/pending/blocked/obsolete | Evidence)\n4. Active tasks\n5. Completed tasks\n6. Obsolete or superseded tasks\n7. Important context and constraints\n8. Files, code, and tool findings\n9. Open issues and next step\n\n",
787 );
788
789 content.push_str("## Messages To Compress\n\n");
790 for message in messages {
791 let role = match message.role {
792 bamboo_agent_core::Role::System => continue,
793 bamboo_agent_core::Role::User => "User",
794 bamboo_agent_core::Role::Assistant => match message.phase {
795 Some(MessagePhase::Commentary) => "Assistant Commentary",
796 Some(MessagePhase::FinalAnswer) => "Assistant Final",
797 None => "Assistant",
798 },
799 bamboo_agent_core::Role::Tool => "Tool Result",
800 };
801
802 content.push_str("### ");
803 content.push_str(role);
804 content.push('\n');
805 if let Some(tool_calls) = &message.tool_calls {
806 if !tool_calls.is_empty() {
807 let names = tool_calls
808 .iter()
809 .map(|call| call.function.name.as_str())
810 .collect::<Vec<_>>()
811 .join(", ");
812 content.push_str("Called tools: ");
813 content.push_str(&names);
814 content.push('\n');
815 }
816 }
817 if let Some(tool_call_id) = &message.tool_call_id {
818 content.push_str("Tool call id: ");
819 content.push_str(tool_call_id);
820 content.push('\n');
821 }
822 let snippet = truncate_chars(&message.content, 2000);
823 content.push_str(&snippet);
824 content.push_str("\n\n");
825 }
826
827 content.push_str(
828 "Return only the summary text. Be explicit about what is active now versus what is already done or no longer relevant.",
829 );
830 content
831}
832
833fn truncate_chars(value: &str, max_chars: usize) -> String {
834 if value.chars().count() <= max_chars {
835 return value.to_string();
836 }
837 value.chars().take(max_chars).collect::<String>() + "..."
838}
839
840#[cfg(test)]
841mod tests {
842 use super::*;
843 use bamboo_agent_core::TokenBudgetUsage;
844 use bamboo_domain::{FunctionCall, TaskItem, TaskItemStatus, TaskList, ToolCall};
845 use chrono::Utc;
846
847 fn make_budget() -> TokenBudget {
848 TokenBudget {
849 max_context_tokens: 1000,
850 max_output_tokens: 100,
851 strategy: BudgetStrategy::Hybrid {
852 window_size: 20,
853 enable_summarization: true,
854 },
855 safety_margin: 0,
856 compression_trigger_percent: 50,
857 compression_target_percent: 20,
858 working_reserve_tokens: 0,
859 fallback_trigger_percent: 75,
860 prompt_cache_min_tool_output_chars: 1_200,
861 prompt_cache_head_chars: 280,
862 prompt_cache_tail_chars: 180,
863 prompt_cache_recent_user_turns: 2,
864 prompt_cache_recent_tool_chains: 2,
865 max_tool_output_tokens: 0,
866 }
867 }
868
869 fn make_session_with_pressure() -> Session {
870 let mut session = Session::new("compression-hysteresis", "gpt-4o-mini");
871 session.token_budget = Some(make_budget());
872 session.add_message(Message::system("system"));
873 for i in 0..3 {
874 session.add_message(Message::user(format!(
875 "User message {i}: {}",
876 "alpha beta gamma delta epsilon ".repeat(2)
877 )));
878 session.add_message(Message::assistant(
879 format!(
880 "Assistant message {i}: {}",
881 "work log decisions next steps ".repeat(2)
882 ),
883 None,
884 ));
885 }
886 session
887 }
888
889 #[test]
890 fn context_window_usage_percent_uses_context_window_denominator() {
891 assert_eq!(context_window_usage_percent(0, 0), 0.0);
892 assert_eq!(context_window_usage_percent(500, 1000), 50.0);
893 }
894
895 #[test]
896 fn estimate_context_compression_exposure_crosses_trigger_when_usage_is_high_enough() {
897 let mut session = make_session_with_pressure();
898 if let Some(budget) = session.token_budget.as_mut() {
899 budget.compression_trigger_percent = 10;
900 }
901 let exposure = estimate_context_compression_exposure(
902 &session,
903 "gpt-4o-mini",
904 session.token_budget.as_ref(),
905 );
906 assert!(exposure.active_usage_percent >= 10.0);
907 assert!(exposure.should_expose_tool);
908 }
909
910 #[test]
911 fn estimate_context_compression_exposure_stays_below_trigger_when_usage_is_low() {
912 let mut session = make_session_with_pressure();
913 if let Some(budget) = session.token_budget.as_mut() {
914 budget.compression_trigger_percent = 99;
915 }
916
917 let exposure = estimate_context_compression_exposure(
918 &session,
919 "gpt-4o-mini",
920 session.token_budget.as_ref(),
921 );
922
923 assert!(exposure.active_usage_percent < 99.0);
924 assert!(!exposure.should_expose_tool);
925 }
926
927 #[test]
928 fn build_summary_prompt_includes_task_list_and_state_sections() {
929 let mut session = Session::new("summary-prompt", "gpt-4o-mini");
930 session.set_task_list(TaskList {
931 session_id: session.id.clone(),
932 title: "Task List".to_string(),
933 items: vec![
934 TaskItem {
935 id: "task_1".to_string(),
936 description: "检查 51% 又回落到 50% 的触发逻辑".to_string(),
937 status: TaskItemStatus::InProgress,
938 depends_on: Vec::new(),
939 notes: "避免刚压缩完又立刻再次压缩".to_string(),
940 ..TaskItem::default()
941 },
942 TaskItem {
943 id: "task_2".to_string(),
944 description: "重写 summarizer prompt 并纳入 task list".to_string(),
945 status: TaskItemStatus::Pending,
946 depends_on: Vec::new(),
947 notes: String::new(),
948 ..TaskItem::default()
949 },
950 ],
951 created_at: Utc::now(),
952 updated_at: Utc::now(),
953 });
954 let prompt = build_summary_prompt(
955 &session,
956 &[
957 Message::user("继续修复 context compression"),
958 Message::assistant("先分析 trigger / target / summary", None),
959 ],
960 Some("old summary"),
961 );
962
963 assert!(prompt.contains("## Current Task List"));
964 assert!(prompt.contains("Current active objective"));
965 assert!(prompt.contains("Requirement checklist"));
966 assert!(prompt.contains("Active tasks"));
967 assert!(prompt.contains("Completed tasks"));
968 assert!(prompt.contains("Obsolete or superseded tasks"));
969 assert!(prompt.contains("检查 51% 又回落到 50% 的触发逻辑"));
970 assert!(prompt.contains("old summary"));
971 }
972
973 #[test]
974 fn forced_plan_keeps_last_three_user_messages_active() {
975 let budget = TokenBudget {
976 max_context_tokens: 1200,
977 max_output_tokens: 100,
978 strategy: BudgetStrategy::Hybrid {
979 window_size: 20,
980 enable_summarization: true,
981 },
982 safety_margin: 0,
983 compression_trigger_percent: 80,
984 compression_target_percent: 20,
985 working_reserve_tokens: 0,
986 fallback_trigger_percent: 75,
987 prompt_cache_min_tool_output_chars: 1_200,
988 prompt_cache_head_chars: 280,
989 prompt_cache_tail_chars: 180,
990 prompt_cache_recent_user_turns: 2,
991 prompt_cache_recent_tool_chains: 2,
992 max_tool_output_tokens: 0,
993 };
994 let mut session = Session::new("keep-last-three-user-turns", "gpt-4o-mini");
995 session.token_budget = Some(budget.clone());
996 session.add_message(Message::system("system"));
997 for i in 0..6 {
998 session.add_message(Message::user(format!(
999 "U{i}: {}",
1000 "alpha beta gamma ".repeat(8)
1001 )));
1002 session.add_message(Message::assistant(
1003 format!("A{i}: {}", "analysis plan steps ".repeat(8)),
1004 None,
1005 ));
1006 }
1007
1008 let plan = build_forced_compression_plan_with_summary(
1009 &session,
1010 "gpt-4o-mini",
1011 Some(&budget),
1012 "summary".to_string(),
1013 CompressionTriggerType::CriticalOverflow,
1014 )
1015 .expect("forced plan should build");
1016
1017 let compressed_ids = plan
1018 .compressed_message_ids
1019 .iter()
1020 .map(String::as_str)
1021 .collect::<HashSet<_>>();
1022 let kept_user_contents = session
1023 .messages
1024 .iter()
1025 .filter(|message| !matches!(message.role, bamboo_agent_core::Role::System))
1026 .filter(|message| !compressed_ids.contains(message.id.as_str()))
1027 .filter(|message| matches!(message.role, bamboo_agent_core::Role::User))
1028 .map(|message| message.content.clone())
1029 .collect::<Vec<_>>();
1030
1031 assert!(
1032 kept_user_contents.len() >= 3,
1033 "expected to keep at least 3 user messages, got {}",
1034 kept_user_contents.len()
1035 );
1036 assert!(kept_user_contents
1037 .iter()
1038 .any(|content| content.starts_with("U3:")));
1039 assert!(kept_user_contents
1040 .iter()
1041 .any(|content| content.starts_with("U4:")));
1042 assert!(kept_user_contents
1043 .iter()
1044 .any(|content| content.starts_with("U5:")));
1045 }
1046
1047 #[test]
1048 fn estimate_exposure_prefers_persisted_budget_usage_when_higher() {
1049 let mut session = Session::new("persisted-usage", "gpt-4o-mini");
1050 session.token_budget = Some(TokenBudget {
1051 max_context_tokens: 100_000,
1052 max_output_tokens: 1_000,
1053 strategy: BudgetStrategy::Hybrid {
1054 window_size: 20,
1055 enable_summarization: true,
1056 },
1057 safety_margin: 0,
1058 compression_trigger_percent: 80,
1059 compression_target_percent: 50,
1060 working_reserve_tokens: 0,
1061 fallback_trigger_percent: 75,
1062 prompt_cache_min_tool_output_chars: 1_200,
1063 prompt_cache_head_chars: 280,
1064 prompt_cache_tail_chars: 180,
1065 prompt_cache_recent_user_turns: 2,
1066 prompt_cache_recent_tool_chains: 2,
1067 max_tool_output_tokens: 0,
1068 });
1069 session.add_message(Message::system("system"));
1070 session.add_message(Message::user("short"));
1071 session.add_message(Message::assistant("short", None));
1072 session.add_message(Message::user("follow-up"));
1073 session.add_message(Message::assistant("reply", None));
1074 session.token_usage = Some(TokenBudgetUsage {
1075 system_tokens: 100,
1076 summary_tokens: 0,
1077 window_tokens: 95_900,
1078 total_tokens: 96_000,
1079 max_context_tokens: 100_000,
1080 budget_limit: 10_000,
1081 truncation_occurred: true,
1082 segments_removed: 12,
1083 prompt_cached_tool_outputs: 0,
1084 thinking_tokens: 0,
1085 cache_read_input_tokens: 0,
1086 });
1087
1088 let exposure = estimate_context_compression_exposure(
1089 &session,
1090 "gpt-4o-mini",
1091 session.token_budget.as_ref(),
1092 );
1093
1094 assert!(
1095 exposure.active_usage_percent >= 96.0,
1096 "expected persisted context-window usage to drive exposure, got {}",
1097 exposure.active_usage_percent
1098 );
1099 assert!(exposure.should_expose_tool);
1100 }
1101
1102 #[test]
1103 fn never_compress_messages_are_excluded_from_summarize_set() {
1104 let budget = TokenBudget {
1105 max_context_tokens: 1200,
1106 max_output_tokens: 100,
1107 strategy: BudgetStrategy::Hybrid {
1108 window_size: 20,
1109 enable_summarization: true,
1110 },
1111 safety_margin: 0,
1112 compression_trigger_percent: 80,
1113 compression_target_percent: 20,
1114 working_reserve_tokens: 0,
1115 fallback_trigger_percent: 75,
1116 prompt_cache_min_tool_output_chars: 1_200,
1117 prompt_cache_head_chars: 280,
1118 prompt_cache_tail_chars: 180,
1119 prompt_cache_recent_user_turns: 2,
1120 prompt_cache_recent_tool_chains: 2,
1121 max_tool_output_tokens: 0,
1122 };
1123 let mut session = Session::new("never-compress-test", "gpt-4o-mini");
1124 session.token_budget = Some(budget.clone());
1125 session.add_message(Message::system("system"));
1126
1127 session.add_message(Message::user("Old question about X"));
1129 session.add_message(Message::assistant("Old answer about X", None));
1130
1131 let mut protected = Message::user("Critical context that must survive");
1133 protected.never_compress = true;
1134 session.add_message(protected);
1135 session.add_message(Message::assistant("Response to critical", None));
1136
1137 for i in 0..4 {
1139 session.add_message(Message::user(format!(
1140 "Recent U{i}: {}",
1141 "padding text to fill budget ".repeat(6)
1142 )));
1143 session.add_message(Message::assistant(
1144 format!("Recent A{i}: {}", "reply padding text ".repeat(6)),
1145 None,
1146 ));
1147 }
1148
1149 let plan = build_forced_compression_plan_with_summary(
1150 &session,
1151 "gpt-4o-mini",
1152 Some(&budget),
1153 "summary".to_string(),
1154 CompressionTriggerType::Auto,
1155 )
1156 .expect("plan should build");
1157
1158 let compressed_ids: HashSet<&str> = plan
1159 .compressed_message_ids
1160 .iter()
1161 .map(String::as_str)
1162 .collect();
1163
1164 let protected_msg = session
1166 .messages
1167 .iter()
1168 .find(|m| m.never_compress)
1169 .expect("should find the protected message");
1170
1171 assert!(
1172 !compressed_ids.contains(protected_msg.id.as_str()),
1173 "never_compress message should NOT be in the compressed set"
1174 );
1175 }
1176
1177 #[test]
1178 fn skill_tool_chain_messages_are_protected_from_compression() {
1179 let budget = TokenBudget {
1180 max_context_tokens: 1200,
1181 max_output_tokens: 100,
1182 strategy: BudgetStrategy::Hybrid {
1183 window_size: 20,
1184 enable_summarization: true,
1185 },
1186 safety_margin: 0,
1187 compression_trigger_percent: 80,
1188 compression_target_percent: 20,
1189 working_reserve_tokens: 0,
1190 fallback_trigger_percent: 75,
1191 prompt_cache_min_tool_output_chars: 1_200,
1192 prompt_cache_head_chars: 280,
1193 prompt_cache_tail_chars: 180,
1194 prompt_cache_recent_user_turns: 2,
1195 prompt_cache_recent_tool_chains: 2,
1196 max_tool_output_tokens: 0,
1197 };
1198 let mut session = Session::new("skill-chain-test", "gpt-4o-mini");
1199 session.token_budget = Some(budget.clone());
1200 session.add_message(Message::system("system"));
1201
1202 let mut skill_call = Message::assistant(String::new(), None);
1204 skill_call.tool_calls = Some(vec![ToolCall {
1205 id: "tc-skill".to_string(),
1206 tool_type: "function".to_string(),
1207 function: FunctionCall {
1208 name: "load_skill".to_string(),
1209 arguments: r#"{"skill_id":"my-skill"}"#.to_string(),
1210 },
1211 }]);
1212 session.add_message(skill_call);
1213
1214 let mut skill_result = Message::tool_result("tc-skill", "skill loaded");
1215 skill_result.tool_success = Some(true);
1216 session.add_message(skill_result);
1217
1218 for i in 0..6 {
1220 session.add_message(Message::user(format!(
1221 "U{i}: {}",
1222 "alpha beta gamma delta ".repeat(8)
1223 )));
1224 session.add_message(Message::assistant(
1225 format!("A{i}: {}", "analysis steps plan ".repeat(8)),
1226 None,
1227 ));
1228 }
1229
1230 let plan = build_forced_compression_plan_with_summary(
1231 &session,
1232 "gpt-4o-mini",
1233 Some(&budget),
1234 "summary".to_string(),
1235 CompressionTriggerType::Auto,
1236 )
1237 .expect("plan should build");
1238
1239 let compressed_ids: HashSet<&str> = plan
1240 .compressed_message_ids
1241 .iter()
1242 .map(String::as_str)
1243 .collect();
1244
1245 let skill_messages: Vec<&Message> = session
1247 .messages
1248 .iter()
1249 .filter(|m| {
1250 m.tool_calls
1251 .as_ref()
1252 .is_some_and(|calls| calls.iter().any(|c| c.function.name == "load_skill"))
1253 || m.tool_call_id.as_deref() == Some("tc-skill")
1254 })
1255 .collect();
1256
1257 for msg in &skill_messages {
1258 assert!(
1259 !compressed_ids.contains(msg.id.as_str()),
1260 "skill tool chain message {} should NOT be compressed",
1261 msg.id
1262 );
1263 }
1264 }
1265
1266 #[test]
1267 fn recovery_message_returns_none_for_empty_messages() {
1268 let session = Session::new("recovery-empty", "model");
1269 let result = build_post_compaction_recovery_message(&[], &session);
1270 assert!(result.is_none());
1271 }
1272
1273 #[test]
1274 fn recovery_message_has_never_compress_flag() {
1275 let mut session = Session::new("recovery-flag", "model");
1276 let messages = vec![Message::assistant("no decisions here", None)];
1277 session.set_task_list(TaskList {
1278 session_id: session.id.clone(),
1279 title: "Tasks".to_string(),
1280 items: vec![TaskItem {
1281 id: "t1".to_string(),
1282 description: "Active task".to_string(),
1283 status: TaskItemStatus::InProgress,
1284 ..TaskItem::default()
1285 }],
1286 created_at: Utc::now(),
1287 updated_at: Utc::now(),
1288 });
1289 let recovery = build_post_compaction_recovery_message(&messages, &session)
1290 .expect("should return recovery message");
1291 assert!(recovery.never_compress);
1292 assert!(recovery.content.contains("[post-compaction-recovery]"));
1293 }
1294
1295 #[test]
1296 fn recovery_message_extracts_file_paths_from_tool_calls() {
1297 let session = Session::new("recovery-files", "model");
1298 let mut write_call = Message::assistant("writing file", None);
1299 write_call.tool_calls = Some(vec![ToolCall {
1300 id: "tc1".to_string(),
1301 tool_type: "function".to_string(),
1302 function: FunctionCall {
1303 name: "Write".to_string(),
1304 arguments: r#"{"file_path":"/src/main.rs","content":"fn main() {}"}"#.to_string(),
1305 },
1306 }]);
1307 let mut edit_call = Message::assistant("editing file", None);
1308 edit_call.tool_calls = Some(vec![ToolCall {
1309 id: "tc2".to_string(),
1310 tool_type: "function".to_string(),
1311 function: FunctionCall {
1312 name: "Edit".to_string(),
1313 arguments: r#"{"file_path":"/lib/utils.rs","old":"x","new":"y"}"#.to_string(),
1314 },
1315 }]);
1316 let messages = vec![write_call, edit_call];
1317
1318 let recovery = build_post_compaction_recovery_message(&messages, &session)
1319 .expect("should return recovery");
1320 assert!(recovery.content.contains("/src/main.rs"));
1321 assert!(recovery.content.contains("/lib/utils.rs"));
1322 assert!(recovery.content.contains("Recently Modified Files"));
1323 }
1324
1325 #[test]
1326 fn recovery_message_includes_active_tasks() {
1327 let mut session = Session::new("recovery-tasks", "model");
1328 session.set_task_list(TaskList {
1329 session_id: session.id.clone(),
1330 title: "Tasks".to_string(),
1331 items: vec![
1332 TaskItem {
1333 id: "t1".to_string(),
1334 description: "Fix auth middleware".to_string(),
1335 status: TaskItemStatus::InProgress,
1336 ..TaskItem::default()
1337 },
1338 TaskItem {
1339 id: "t2".to_string(),
1340 description: "Add tests".to_string(),
1341 status: TaskItemStatus::Pending,
1342 ..TaskItem::default()
1343 },
1344 TaskItem {
1345 id: "t3".to_string(),
1346 description: "Done task".to_string(),
1347 status: TaskItemStatus::Completed,
1348 ..TaskItem::default()
1349 },
1350 ],
1351 created_at: Utc::now(),
1352 updated_at: Utc::now(),
1353 });
1354 let messages = vec![Message::assistant("some work", None)];
1355
1356 let recovery = build_post_compaction_recovery_message(&messages, &session)
1357 .expect("should return recovery");
1358 assert!(recovery.content.contains("Active Tasks"));
1359 assert!(recovery.content.contains("Fix auth middleware"));
1360 assert!(recovery.content.contains("Add tests"));
1361 assert!(!recovery.content.contains("Done task"));
1363 }
1364
1365 #[test]
1366 fn apply_compression_plan_injects_recovery_message() {
1367 let budget = TokenBudget {
1368 max_context_tokens: 1200,
1369 max_output_tokens: 100,
1370 strategy: BudgetStrategy::Hybrid {
1371 window_size: 20,
1372 enable_summarization: true,
1373 },
1374 safety_margin: 0,
1375 compression_trigger_percent: 80,
1376 compression_target_percent: 20,
1377 working_reserve_tokens: 0,
1378 fallback_trigger_percent: 75,
1379 prompt_cache_min_tool_output_chars: 1_200,
1380 prompt_cache_head_chars: 280,
1381 prompt_cache_tail_chars: 180,
1382 prompt_cache_recent_user_turns: 2,
1383 prompt_cache_recent_tool_chains: 2,
1384 max_tool_output_tokens: 0,
1385 };
1386 let mut session = Session::new("recovery-inject", "gpt-4o-mini");
1387 session.token_budget = Some(budget.clone());
1388 session.add_message(Message::system("system"));
1389
1390 let mut write_msg = Message::assistant("writing", None);
1392 write_msg.tool_calls = Some(vec![ToolCall {
1393 id: "tc-w".to_string(),
1394 tool_type: "function".to_string(),
1395 function: FunctionCall {
1396 name: "Write".to_string(),
1397 arguments: r#"{"file_path":"/src/lib.rs","content":"pub fn hello() {}"}"#
1398 .to_string(),
1399 },
1400 }]);
1401 session.add_message(Message::user("Write the file"));
1402 session.add_message(write_msg);
1403
1404 for i in 0..6 {
1406 session.add_message(Message::user(format!(
1407 "U{i}: {}",
1408 "alpha beta gamma delta ".repeat(8)
1409 )));
1410 session.add_message(Message::assistant(
1411 format!("A{i}: {}", "analysis plan ".repeat(8)),
1412 None,
1413 ));
1414 }
1415
1416 let plan = build_forced_compression_plan_with_summary(
1417 &session,
1418 "gpt-4o-mini",
1419 Some(&budget),
1420 "summary text".to_string(),
1421 CompressionTriggerType::Auto,
1422 )
1423 .expect("plan should build");
1424
1425 assert!(plan.compressed_message_ids.len() > 0);
1426
1427 let compressed_count = apply_compression_plan(&mut session, plan);
1428 assert!(compressed_count > 0);
1429
1430 let has_recovery = session.messages.iter().any(|m| {
1432 m.never_compress
1433 && m.content.contains("[post-compaction-recovery]")
1434 && m.content.contains("/src/lib.rs")
1435 });
1436 assert!(
1437 has_recovery,
1438 "session should contain a post-compaction recovery message with the file path"
1439 );
1440 }
1441
1442 #[test]
1443 fn summary_quality_full_coverage_when_all_files_mentioned() {
1444 let messages = vec![{
1445 let mut m = Message::assistant("writing", None);
1446 m.tool_calls = Some(vec![ToolCall {
1447 id: "tc1".to_string(),
1448 tool_type: "function".to_string(),
1449 function: FunctionCall {
1450 name: "Write".to_string(),
1451 arguments: r#"{"file_path":"/src/main.rs","content":"fn main() {}"}"#
1452 .to_string(),
1453 },
1454 }]);
1455 m
1456 }];
1457 let summary = "Modified /src/main.rs to add main function";
1458 let quality = validate_summary_quality(summary, &messages);
1459 assert!(
1460 quality.file_coverage >= 0.99,
1461 "file_coverage should be ~1.0, got {:.2}",
1462 quality.file_coverage
1463 );
1464 }
1465
1466 #[test]
1467 fn summary_quality_zero_coverage_when_no_files_mentioned() {
1468 let messages = vec![{
1469 let mut m = Message::assistant("writing", None);
1470 m.tool_calls = Some(vec![ToolCall {
1471 id: "tc1".to_string(),
1472 tool_type: "function".to_string(),
1473 function: FunctionCall {
1474 name: "Write".to_string(),
1475 arguments: r#"{"file_path":"/src/main.rs","content":"fn main() {}"}"#
1476 .to_string(),
1477 },
1478 }]);
1479 m
1480 }];
1481 let summary = "Summary that mentions nothing about files";
1482 let quality = validate_summary_quality(summary, &messages);
1483 assert!(
1484 quality.file_coverage < 0.01,
1485 "file_coverage should be ~0.0, got {:.2}",
1486 quality.file_coverage
1487 );
1488 }
1489
1490 #[test]
1491 fn summary_quality_handles_empty_messages() {
1492 let quality = validate_summary_quality("some summary", &[]);
1493 assert_eq!(quality.file_coverage, 1.0);
1494 assert_eq!(quality.decision_coverage, 1.0);
1495 }
1496}