Skip to main content

harn_vm/orchestration/
compaction.rs

1//! Auto-compaction — transcript size management strategies.
2
3use std::collections::BTreeMap;
4use std::rc::Rc;
5
6use crate::llm::{vm_call_llm_full, vm_value_to_json};
7use crate::value::{VmError, VmValue};
8
9#[derive(Clone, Debug, PartialEq, Eq)]
10pub enum CompactStrategy {
11    Llm,
12    Truncate,
13    Custom,
14    ObservationMask,
15}
16
17pub fn parse_compact_strategy(value: &str) -> Result<CompactStrategy, VmError> {
18    match value {
19        "llm" => Ok(CompactStrategy::Llm),
20        "truncate" => Ok(CompactStrategy::Truncate),
21        "custom" => Ok(CompactStrategy::Custom),
22        "observation_mask" => Ok(CompactStrategy::ObservationMask),
23        other => Err(VmError::Runtime(format!(
24            "unknown compact_strategy '{other}' (expected 'llm', 'truncate', 'custom', or 'observation_mask')"
25        ))),
26    }
27}
28
29pub fn compact_strategy_name(strategy: &CompactStrategy) -> &'static str {
30    match strategy {
31        CompactStrategy::Llm => "llm",
32        CompactStrategy::Truncate => "truncate",
33        CompactStrategy::Custom => "custom",
34        CompactStrategy::ObservationMask => "observation_mask",
35    }
36}
37
38/// Configuration for automatic transcript compaction in agent loops.
39///
40/// Two-tier compaction:
41///   Tier 1 (`token_threshold` / `compact_strategy`): lightweight, deterministic
42///     observation masking that fires early. Masks verbose tool results while
43///     preserving assistant prose and error output.
44///   Tier 2 (`hard_limit_tokens` / `hard_limit_strategy`): aggressive LLM-powered
45///     summarization that fires when tier-1 alone isn't enough, typically as the
46///     transcript approaches the model's actual context window.
47#[derive(Clone, Debug)]
48pub struct AutoCompactConfig {
49    /// Number of earliest messages to keep verbatim before the compacted
50    /// summary. The system prompt is not part of this list and is always
51    /// preserved separately by the caller.
52    pub keep_first: usize,
53    /// Tier-1 threshold: estimated tokens before lightweight compaction.
54    pub token_threshold: usize,
55    /// Maximum character length for a single tool result before microcompaction.
56    pub tool_output_max_chars: usize,
57    /// Number of recent messages to keep during compaction.
58    pub keep_last: usize,
59    /// Tier-1 strategy (default: ObservationMask).
60    pub compact_strategy: CompactStrategy,
61    /// Tier-2 threshold: fires when tier-1 result still exceeds this.
62    /// Typically set to ~75% of the model's actual context window.
63    /// When `None`, tier-2 is disabled.
64    pub hard_limit_tokens: Option<usize>,
65    /// Tier-2 strategy (default: Llm).
66    pub hard_limit_strategy: CompactStrategy,
67    /// Optional Harn callback used when a strategy is `custom`.
68    pub custom_compactor: Option<VmValue>,
69    /// Optional callback for domain-specific per-message masking during
70    /// observation mask compaction. Called with a list of archived messages,
71    /// returns a list of `Option<String>` — `Some(masked)` to override the
72    /// default mask for that message, `None` to use the default.
73    /// This lets the host (e.g. an IDE or cloud runner) inject AST outlines,
74    /// file summaries, etc. without putting language-specific logic in Harn.
75    pub mask_callback: Option<VmValue>,
76    /// Optional callback for per-tool-result compression. Called with
77    /// `{tool_name, output, max_chars}` and returns compressed output string.
78    /// When set, used INSTEAD of the built-in `microcompact_tool_output`.
79    /// This allows the pipeline to use LLM-based compression rather than
80    /// keyword heuristics.
81    pub compress_callback: Option<VmValue>,
82    /// Optional prompt-template asset path used when LLM compaction is
83    /// selected. The rendered template becomes the user message sent to
84    /// the summarizer.
85    pub summarize_prompt: Option<String>,
86    /// User-facing policy label for replay and observability. This can be
87    /// broader than the engine strategy, e.g. `hybrid` lowers to LLM
88    /// summarization plus truncate fallback.
89    pub policy_strategy: String,
90}
91
92impl Default for AutoCompactConfig {
93    fn default() -> Self {
94        Self {
95            keep_first: 0,
96            token_threshold: 48_000,
97            tool_output_max_chars: 16_000,
98            keep_last: 12,
99            compact_strategy: CompactStrategy::ObservationMask,
100            hard_limit_tokens: None,
101            hard_limit_strategy: CompactStrategy::Llm,
102            custom_compactor: None,
103            mask_callback: None,
104            compress_callback: None,
105            summarize_prompt: None,
106            policy_strategy: compact_strategy_name(&CompactStrategy::ObservationMask).to_string(),
107        }
108    }
109}
110
111/// Estimate token count from a list of JSON messages (chars / 4 heuristic).
112pub fn estimate_message_tokens(messages: &[serde_json::Value]) -> usize {
113    messages
114        .iter()
115        .map(|m| {
116            m.get("content")
117                .and_then(|c| c.as_str())
118                .map(|s| s.len())
119                .unwrap_or(0)
120        })
121        .sum::<usize>()
122        / 4
123}
124
125fn is_reasoning_or_tool_turn_message(message: &serde_json::Value) -> bool {
126    let role = message
127        .get("role")
128        .and_then(|value| value.as_str())
129        .unwrap_or_default();
130    role == "tool"
131        || message.get("tool_calls").is_some()
132        || message
133            .get("reasoning")
134            .map(|value| !value.is_null())
135            .unwrap_or(false)
136}
137
138fn find_prev_user_boundary(messages: &[serde_json::Value], start: usize) -> Option<usize> {
139    (0..=start)
140        .rev()
141        .find(|idx| messages[*idx].get("role").and_then(|value| value.as_str()) == Some("user"))
142}
143
144/// Microcompact a tool result: if it exceeds `max_chars`, keep the first and
145/// last portions with a snip marker in between.
146pub fn microcompact_tool_output(output: &str, max_chars: usize) -> String {
147    if output.len() <= max_chars || max_chars < 200 {
148        return output.to_string();
149    }
150    let diagnostic_lines = output
151        .lines()
152        .filter(|line| {
153            let trimmed = line.trim();
154            let lower = trimmed.to_lowercase();
155            let has_file_line = {
156                let bytes = trimmed.as_bytes();
157                let mut i = 0;
158                let mut found_colon = false;
159                while i < bytes.len() {
160                    if bytes[i] == b':' {
161                        found_colon = true;
162                        break;
163                    }
164                    i += 1;
165                }
166                found_colon && i + 1 < bytes.len() && bytes[i + 1].is_ascii_digit()
167            };
168            let has_strong_keyword =
169                trimmed.contains("FAIL") || trimmed.contains("panic") || trimmed.contains("Panic");
170            let has_weak_keyword = trimmed.contains("error")
171                || trimmed.contains("undefined")
172                || trimmed.contains("expected")
173                || trimmed.contains("got")
174                || lower.contains("cannot find")
175                || lower.contains("not found")
176                || lower.contains("no such")
177                || lower.contains("unresolved")
178                || lower.contains("missing")
179                || lower.contains("declared but not used")
180                || lower.contains("unused")
181                || lower.contains("mismatch");
182            let positional = lower.contains(" error ")
183                || lower.starts_with("error:")
184                || lower.starts_with("warning:")
185                || lower.starts_with("note:")
186                || lower.contains("panic:");
187            has_strong_keyword || (has_file_line && has_weak_keyword) || positional
188        })
189        .take(32)
190        .collect::<Vec<_>>();
191    if !diagnostic_lines.is_empty() {
192        let diagnostics = diagnostic_lines.join("\n");
193        let budget = max_chars.saturating_sub(diagnostics.len() + 64);
194        let keep = budget / 2;
195        if keep >= 80 && output.len() > keep * 2 {
196            let head = snap_to_line_end(output, keep);
197            let tail = snap_to_line_start(output, output.len().saturating_sub(keep));
198            return format!(
199                "{head}\n\n[diagnostic lines preserved]\n{diagnostics}\n\n[... output compacted ...]\n\n{tail}"
200            );
201        }
202    }
203    let keep = max_chars / 2;
204    let head = snap_to_line_end(output, keep);
205    let tail = snap_to_line_start(output, output.len().saturating_sub(keep));
206    let snipped = output.len().saturating_sub(head.len() + tail.len());
207    format!("{head}\n\n[... {snipped} characters snipped ...]\n\n{tail}")
208}
209
210/// Snap a byte offset to the nearest preceding line boundary (end of a complete line).
211/// Returns the substring from the start up to and including the last complete line
212/// that fits within `max_bytes`. Never cuts mid-line.
213fn snap_to_line_end(s: &str, max_bytes: usize) -> &str {
214    if max_bytes >= s.len() {
215        return s;
216    }
217    let search_end = s.floor_char_boundary(max_bytes);
218    match s[..search_end].rfind('\n') {
219        Some(pos) => &s[..pos + 1],
220        None => &s[..search_end], // single long line — fall back to char boundary
221    }
222}
223
224/// Snap a byte offset to the nearest following line boundary (start of a complete line).
225/// Returns the substring from the first complete line at or after `start_byte`.
226/// Never cuts mid-line.
227fn snap_to_line_start(s: &str, start_byte: usize) -> &str {
228    if start_byte == 0 {
229        return s;
230    }
231    let search_start = s.ceil_char_boundary(start_byte);
232    if search_start >= s.len() {
233        return "";
234    }
235    match s[search_start..].find('\n') {
236        Some(pos) => {
237            let line_start = search_start + pos + 1;
238            if line_start < s.len() {
239                &s[line_start..]
240            } else {
241                &s[search_start..]
242            }
243        }
244        None => &s[search_start..], // already at start of last line
245    }
246}
247
248fn format_compaction_messages(messages: &[serde_json::Value]) -> String {
249    messages
250        .iter()
251        .map(|msg| {
252            let role = msg
253                .get("role")
254                .and_then(|v| v.as_str())
255                .unwrap_or("user")
256                .to_uppercase();
257            let content = msg
258                .get("content")
259                .and_then(|v| v.as_str())
260                .unwrap_or_default();
261            format!("{role}: {content}")
262        })
263        .collect::<Vec<_>>()
264        .join("\n")
265}
266
267fn truncate_compaction_summary(
268    old_messages: &[serde_json::Value],
269    archived_count: usize,
270) -> String {
271    truncate_compaction_summary_with_context(old_messages, archived_count, false)
272}
273
274fn truncate_compaction_summary_with_context(
275    old_messages: &[serde_json::Value],
276    archived_count: usize,
277    is_llm_fallback: bool,
278) -> String {
279    let per_msg_limit = 500_usize;
280    let summary_parts: Vec<String> = old_messages
281        .iter()
282        .filter_map(|m| {
283            let role = m.get("role")?.as_str()?;
284            let content = m.get("content")?.as_str()?;
285            if content.is_empty() {
286                return None;
287            }
288            let truncated = if content.len() > per_msg_limit {
289                format!(
290                    "{}... [truncated from {} chars]",
291                    &content[..content.floor_char_boundary(per_msg_limit)],
292                    content.len()
293                )
294            } else {
295                content.to_string()
296            };
297            Some(format!("[{role}] {truncated}"))
298        })
299        .take(15)
300        .collect();
301    let header = if is_llm_fallback {
302        format!(
303            "[auto-compact fallback: LLM summarizer returned empty; {archived_count} older messages abbreviated to ~{per_msg_limit} chars each]"
304        )
305    } else {
306        format!("[auto-compacted {archived_count} older messages via truncate strategy]")
307    };
308    format!(
309        "{header}\n{}{}",
310        summary_parts.join("\n"),
311        if archived_count > 15 {
312            format!("\n... and {} more", archived_count - 15)
313        } else {
314            String::new()
315        }
316    )
317}
318
319fn compact_summary_text_from_value(value: &VmValue) -> Result<String, VmError> {
320    if let Some(map) = value.as_dict() {
321        if let Some(summary) = map.get("summary").or_else(|| map.get("text")) {
322            return Ok(summary.display());
323        }
324    }
325    match value {
326        VmValue::String(text) => Ok(text.to_string()),
327        VmValue::Nil => Ok(String::new()),
328        _ => serde_json::to_string_pretty(&vm_value_to_json(value))
329            .map_err(|e| VmError::Runtime(format!("custom compactor encode error: {e}"))),
330    }
331}
332
333async fn llm_compaction_summary(
334    old_messages: &[serde_json::Value],
335    archived_count: usize,
336    llm_opts: &crate::llm::api::LlmCallOptions,
337    summarize_prompt: Option<&str>,
338) -> Result<String, VmError> {
339    let mut compact_opts = llm_opts.clone();
340    let formatted = format_compaction_messages(old_messages);
341    compact_opts.system = None;
342    compact_opts.transcript_summary = None;
343    compact_opts.native_tools = None;
344    compact_opts.tool_choice = None;
345    compact_opts.output_format = crate::llm::api::OutputFormat::Text;
346    compact_opts.response_format = None;
347    compact_opts.json_schema = None;
348    compact_opts.output_schema = None;
349    let prompt = render_llm_compaction_prompt(summarize_prompt, &formatted, archived_count)?;
350    compact_opts.messages = vec![serde_json::json!({
351        "role": "user",
352        "content": prompt,
353    })];
354    let result = vm_call_llm_full(&compact_opts).await?;
355    let summary = result.text.trim();
356    if summary.is_empty() {
357        Ok(truncate_compaction_summary_with_context(
358            old_messages,
359            archived_count,
360            true,
361        ))
362    } else {
363        Ok(format!(
364            "[auto-compacted {archived_count} older messages]\n{summary}"
365        ))
366    }
367}
368
369fn render_llm_compaction_prompt(
370    summarize_prompt: Option<&str>,
371    formatted: &str,
372    archived_count: usize,
373) -> Result<String, VmError> {
374    let mut bindings = BTreeMap::new();
375    bindings.insert(
376        "formatted_messages".to_string(),
377        VmValue::String(Rc::from(formatted.to_string())),
378    );
379    bindings.insert(
380        "archived_count".to_string(),
381        VmValue::Int(archived_count as i64),
382    );
383    let Some(path) = summarize_prompt.filter(|path| !path.trim().is_empty()) else {
384        return crate::stdlib::template::render_stdlib_prompt_asset(
385            "orchestration/prompts/compaction_summary.harn.prompt",
386            Some(&bindings),
387        );
388    };
389
390    let asset = crate::stdlib::template::TemplateAsset::render_target(path)
391        .map_err(|error| VmError::Runtime(format!("compaction summarize_prompt: {error}")))?;
392    crate::stdlib::template::render_asset_result(&asset, Some(&bindings)).map_err(VmError::from)
393}
394
395async fn custom_compaction_summary(
396    old_messages: &[serde_json::Value],
397    archived_count: usize,
398    callback: &VmValue,
399) -> Result<String, VmError> {
400    let Some(VmValue::Closure(closure)) = Some(callback.clone()) else {
401        return Err(VmError::Runtime(
402            "compact_callback must be a closure when compact_strategy is 'custom'".to_string(),
403        ));
404    };
405    let mut vm = crate::vm::clone_async_builtin_child_vm().ok_or_else(|| {
406        VmError::Runtime(
407            "custom transcript compaction requires an async builtin VM context".to_string(),
408        )
409    })?;
410    let messages_vm = VmValue::List(Rc::new(
411        old_messages
412            .iter()
413            .map(crate::stdlib::json_to_vm_value)
414            .collect(),
415    ));
416    let result = vm.call_closure_pub(&closure, &[messages_vm]).await;
417    let summary = compact_summary_text_from_value(&result?)?;
418    if summary.trim().is_empty() {
419        Ok(truncate_compaction_summary(old_messages, archived_count))
420    } else {
421        Ok(format!(
422            "[auto-compacted {archived_count} older messages]\n{summary}"
423        ))
424    }
425}
426
427/// Check whether a tool-result string should be preserved verbatim during
428/// observation masking. Uses content length as the primary heuristic:
429/// short results (< 500 chars) are kept since they're typically error messages,
430/// status lines, or concise answers that are cheap to retain and risky to mask.
431/// Long results are masked to save context budget.
432fn content_should_preserve(content: &str) -> bool {
433    content.len() < 500
434}
435
436/// Default per-message masking for tool results.
437fn default_mask_tool_result(role: &str, content: &str) -> String {
438    let first_line = content.lines().next().unwrap_or(content);
439    let line_count = content.lines().count();
440    let char_count = content.len();
441    if line_count <= 3 {
442        format!("[{role}] {content}")
443    } else {
444        let preview = &first_line[..first_line.len().min(120)];
445        format!("[{role}] {preview}... [{line_count} lines, {char_count} chars masked]")
446    }
447}
448
449/// Deterministic observation-mask compaction.
450#[cfg(test)]
451pub(crate) fn observation_mask_compaction(
452    old_messages: &[serde_json::Value],
453    archived_count: usize,
454) -> String {
455    observation_mask_compaction_with_callback(old_messages, archived_count, None)
456}
457
458fn observation_mask_compaction_with_callback(
459    old_messages: &[serde_json::Value],
460    archived_count: usize,
461    mask_results: Option<&[Option<String>]>,
462) -> String {
463    let mut parts = Vec::new();
464    parts.push(format!(
465        "[auto-compacted {archived_count} older messages via observation masking]"
466    ));
467    for (idx, msg) in old_messages.iter().enumerate() {
468        let role = msg.get("role").and_then(|v| v.as_str()).unwrap_or("user");
469        let content = msg
470            .get("content")
471            .and_then(|v| v.as_str())
472            .unwrap_or_default();
473        if content.is_empty() {
474            continue;
475        }
476        if role == "assistant" {
477            parts.push(format!("[assistant] {content}"));
478            continue;
479        }
480        if content_should_preserve(content) {
481            parts.push(format!("[{role}] {content}"));
482        } else if let Some(Some(custom)) = mask_results.and_then(|r| r.get(idx)) {
483            parts.push(custom.clone());
484        } else {
485            parts.push(default_mask_tool_result(role, content));
486        }
487    }
488    parts.join("\n")
489}
490
491/// Invoke the mask_callback to get per-message custom masks.
492async fn invoke_mask_callback(
493    callback: &VmValue,
494    old_messages: &[serde_json::Value],
495) -> Result<Vec<Option<String>>, VmError> {
496    let VmValue::Closure(closure) = callback.clone() else {
497        return Err(VmError::Runtime(
498            "mask_callback must be a closure".to_string(),
499        ));
500    };
501    let mut vm = crate::vm::clone_async_builtin_child_vm().ok_or_else(|| {
502        VmError::Runtime("mask_callback requires an async builtin VM context".to_string())
503    })?;
504    let messages_vm = VmValue::List(Rc::new(
505        old_messages
506            .iter()
507            .map(crate::stdlib::json_to_vm_value)
508            .collect(),
509    ));
510    let result = vm.call_closure_pub(&closure, &[messages_vm]).await?;
511    let list = match result {
512        VmValue::List(items) => items,
513        _ => return Ok(vec![None; old_messages.len()]),
514    };
515    Ok(list
516        .iter()
517        .map(|v| match v {
518            VmValue::String(s) => Some(s.to_string()),
519            VmValue::Nil => None,
520            _ => None,
521        })
522        .collect())
523}
524
525/// Apply a single compaction strategy to a list of archived messages.
526async fn apply_compaction_strategy(
527    strategy: &CompactStrategy,
528    old_messages: &[serde_json::Value],
529    archived_count: usize,
530    llm_opts: Option<&crate::llm::api::LlmCallOptions>,
531    custom_compactor: Option<&VmValue>,
532    mask_callback: Option<&VmValue>,
533    summarize_prompt: Option<&str>,
534) -> Result<String, VmError> {
535    match strategy {
536        CompactStrategy::Truncate => Ok(truncate_compaction_summary(old_messages, archived_count)),
537        CompactStrategy::Llm => {
538            llm_compaction_summary(
539                old_messages,
540                archived_count,
541                llm_opts.ok_or_else(|| {
542                    VmError::Runtime(
543                        "LLM transcript compaction requires active LLM call options".to_string(),
544                    )
545                })?,
546                summarize_prompt,
547            )
548            .await
549        }
550        CompactStrategy::Custom => {
551            custom_compaction_summary(
552                old_messages,
553                archived_count,
554                custom_compactor.ok_or_else(|| {
555                    VmError::Runtime(
556                        "compact_callback is required when compact_strategy is 'custom'"
557                            .to_string(),
558                    )
559                })?,
560            )
561            .await
562        }
563        CompactStrategy::ObservationMask => {
564            let mask_results = if let Some(cb) = mask_callback {
565                Some(invoke_mask_callback(cb, old_messages).await?)
566            } else {
567                None
568            };
569            Ok(observation_mask_compaction_with_callback(
570                old_messages,
571                archived_count,
572                mask_results.as_deref(),
573            ))
574        }
575    }
576}
577
578/// Auto-compact a message list in place using two-tier compaction.
579pub(crate) async fn auto_compact_messages(
580    messages: &mut Vec<serde_json::Value>,
581    config: &AutoCompactConfig,
582    llm_opts: Option<&crate::llm::api::LlmCallOptions>,
583) -> Result<Option<String>, VmError> {
584    if config.token_threshold > 0 && estimate_message_tokens(messages) <= config.token_threshold {
585        return Ok(None);
586    }
587    if messages.len() <= config.keep_first.saturating_add(config.keep_last) {
588        return Ok(None);
589    }
590    let compact_start = config.keep_first.min(messages.len());
591    let original_split = messages.len().saturating_sub(config.keep_last);
592    let mut split_at = original_split;
593    // Snap back to a user-role boundary so the kept suffix begins at a clean
594    // turn. OpenAI-compatible APIs reject tool results orphaned from their
595    // assistant request, so splitting mid-turn corrupts the transcript.
596    while split_at > compact_start
597        && split_at < messages.len()
598        && messages[split_at]
599            .get("role")
600            .and_then(|r| r.as_str())
601            .is_none_or(|r| r != "user")
602    {
603        split_at -= 1;
604    }
605    // Fall back to the naive split (e.g. tool-heavy transcripts with the sole
606    // user message at index 0) rather than skipping compaction entirely.
607    if split_at == compact_start {
608        split_at = original_split;
609    }
610    if let Some(volatile_start) = messages[split_at..]
611        .iter()
612        .position(is_reasoning_or_tool_turn_message)
613        .map(|offset| split_at + offset)
614    {
615        if let Some(boundary) = volatile_start
616            .checked_sub(1)
617            .and_then(|idx| find_prev_user_boundary(messages, idx))
618            .filter(|boundary| *boundary > compact_start)
619        {
620            split_at = boundary;
621        }
622    }
623    if split_at <= compact_start {
624        return Ok(None);
625    }
626    let old_messages: Vec<_> = messages.drain(compact_start..split_at).collect();
627    let archived_count = old_messages.len();
628
629    let mut summary = apply_compaction_strategy(
630        &config.compact_strategy,
631        &old_messages,
632        archived_count,
633        llm_opts,
634        config.custom_compactor.as_ref(),
635        config.mask_callback.as_ref(),
636        config.summarize_prompt.as_deref(),
637    )
638    .await?;
639
640    if let Some(hard_limit) = config.hard_limit_tokens {
641        let summary_msg = serde_json::json!({"role": "user", "content": &summary});
642        let mut estimate_msgs = vec![summary_msg];
643        estimate_msgs.extend_from_slice(messages.as_slice());
644        let estimated = estimate_message_tokens(&estimate_msgs);
645        if estimated > hard_limit {
646            let tier1_as_messages = vec![serde_json::json!({
647                "role": "user",
648                "content": summary,
649            })];
650            summary = apply_compaction_strategy(
651                &config.hard_limit_strategy,
652                &tier1_as_messages,
653                archived_count,
654                llm_opts,
655                config.custom_compactor.as_ref(),
656                None,
657                config.summarize_prompt.as_deref(),
658            )
659            .await?;
660        }
661    }
662
663    messages.insert(
664        compact_start,
665        serde_json::json!({
666            "role": "user",
667            "content": summary,
668        }),
669    );
670    Ok(Some(summary))
671}
672
673#[cfg(test)]
674mod tests {
675    use super::*;
676
677    #[test]
678    fn microcompact_short_output_unchanged() {
679        let output = "line1\nline2\nline3\n";
680        assert_eq!(microcompact_tool_output(output, 1000), output);
681    }
682
683    #[test]
684    fn microcompact_snaps_to_line_boundaries() {
685        let lines: Vec<String> = (0..20)
686            .map(|i| format!("line {:02} content here", i))
687            .collect();
688        let output = lines.join("\n");
689        let result = microcompact_tool_output(&output, 200);
690        assert!(result.contains("[... "), "should have snip marker");
691        let parts: Vec<&str> = result.split("\n\n[... ").collect();
692        assert!(parts.len() >= 2, "should split at marker");
693        let head = parts[0];
694        for line in head.lines() {
695            assert!(
696                line.starts_with("line "),
697                "head line should be complete: {line}"
698            );
699        }
700    }
701
702    #[test]
703    fn microcompact_preserves_diagnostic_lines_with_line_boundaries() {
704        let mut lines = Vec::new();
705        for i in 0..50 {
706            lines.push(format!("verbose output line {i}"));
707        }
708        lines.push("src/main.rs:42: error: cannot find value".to_string());
709        for i in 50..100 {
710            lines.push(format!("verbose output line {i}"));
711        }
712        let output = lines.join("\n");
713        let result = microcompact_tool_output(&output, 600);
714        assert!(result.contains("cannot find value"), "diagnostic preserved");
715        assert!(
716            result.contains("[diagnostic lines preserved]"),
717            "has diagnostic marker"
718        );
719    }
720
721    #[test]
722    fn snap_to_line_end_finds_newline() {
723        let s = "line1\nline2\nline3\nline4\n";
724        let head = snap_to_line_end(s, 12);
725        assert!(head.ends_with('\n'), "should end at newline");
726        assert!(head.contains("line1"));
727    }
728
729    #[test]
730    fn snap_to_line_start_finds_newline() {
731        let s = "line1\nline2\nline3\nline4\n";
732        let tail = snap_to_line_start(s, 12);
733        assert!(
734            tail.starts_with("line"),
735            "should start at line boundary: {tail}"
736        );
737    }
738
739    #[test]
740    fn auto_compact_preserves_reasoning_tool_suffix() {
741        let mut messages = vec![
742            serde_json::json!({"role": "user", "content": "old task"}),
743            serde_json::json!({"role": "assistant", "content": "old reply"}),
744            serde_json::json!({"role": "user", "content": "new task"}),
745            serde_json::json!({
746                "role": "assistant",
747                "content": "",
748                "reasoning": "think first",
749                "tool_calls": [{
750                    "id": "call_1",
751                    "type": "function",
752                    "function": {"name": "read", "arguments": "{\"path\":\"foo.rs\"}"}
753                }],
754            }),
755            serde_json::json!({"role": "tool", "tool_call_id": "call_1", "content": "file"}),
756        ];
757        let config = AutoCompactConfig {
758            token_threshold: 1,
759            keep_last: 2,
760            ..Default::default()
761        };
762
763        let runtime = tokio::runtime::Builder::new_current_thread()
764            .enable_all()
765            .build()
766            .expect("runtime");
767        let summary = runtime
768            .block_on(auto_compact_messages(&mut messages, &config, None))
769            .expect("compaction succeeds");
770
771        assert!(summary.is_some());
772        assert_eq!(messages[1]["role"], "user");
773        assert_eq!(messages[2]["role"], "assistant");
774        assert_eq!(messages[2]["tool_calls"][0]["id"], "call_1");
775        assert_eq!(messages[3]["role"], "tool");
776        assert_eq!(messages[3]["tool_call_id"], "call_1");
777    }
778}