Skip to main content

synaps_cli/core/
compaction.rs

1//! Conversation compaction — turn a long message history into a structured summary.
2
3use serde_json::{json, Value};
4
5/// System prompt used for the compaction API call.
6/// Instructs the model to summarize, not continue the conversation.
7pub const COMPACTION_SYSTEM_PROMPT: &str = "You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified.\n\nDo NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.";
8
9use crate::runtime::Runtime;
10use crate::error::Result;
11
12
13const SUMMARIZATION_PROMPT: &str = r#"The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work.
14
15Use this EXACT format:
16
17## Goal
18[What is the user trying to accomplish? Can be multiple items if the session covers different tasks.]
19
20## Constraints & Preferences
21- [Any constraints, preferences, or requirements mentioned by user]
22- [Or "(none)" if none were mentioned]
23
24## Progress
25### Done
26- [x] [Completed tasks/changes]
27
28### In Progress
29- [ ] [Current work]
30
31### Blocked
32- [Issues preventing progress, if any]
33
34## Key Decisions
35- **[Decision]**: [Brief rationale]
36
37## Next Steps
381. [Ordered list of what should happen next]
39
40## Critical Context
41- [Any data, examples, or references needed to continue]
42- [Or "(none)" if not applicable]
43
44Keep each section concise. Preserve exact file paths, function names, and error messages."#;
45
46const UPDATE_SUMMARIZATION_PROMPT: &str = r#"The messages above are NEW conversation messages to incorporate into the existing summary provided earlier in the conversation.
47
48Update the existing structured summary with new information. RULES:
49- PRESERVE all existing information from the previous summary
50- ADD new progress, decisions, and context from the new messages
51- UPDATE the Progress section: move items from "In Progress" to "Done" when completed
52- UPDATE "Next Steps" based on what was accomplished
53- PRESERVE exact file paths, function names, and error messages
54- If something is no longer relevant, you may remove it
55
56Use this EXACT format:
57
58## Goal
59[Preserve existing goals, add new ones if the task expanded]
60
61## Constraints & Preferences
62- [Preserve existing, add new ones discovered]
63
64## Progress
65### Done
66- [x] [Include previously done items AND newly completed items]
67
68### In Progress
69- [ ] [Current work - update based on progress]
70
71### Blocked
72- [Current blockers - remove if resolved]
73
74## Key Decisions
75- **[Decision]**: [Brief rationale] (preserve all previous, add new)
76
77## Next Steps
781. [Update based on current state]
79
80## Critical Context
81- [Preserve important context, add new if needed]
82
83Keep each section concise. Preserve exact file paths, function names, and error messages."#;
84
85struct FileOps {
86    read: std::collections::HashSet<String>,
87    written: std::collections::HashSet<String>,
88    edited: std::collections::HashSet<String>,
89}
90
91impl FileOps {
92    fn new() -> Self {
93        Self {
94            read: std::collections::HashSet::new(),
95            written: std::collections::HashSet::new(),
96            edited: std::collections::HashSet::new(),
97        }
98    }
99}
100
101/// Serialize the in-memory API message history into a readable transcript and
102/// ask the LLM to produce a structured summary. Called by `/compact`.
103pub async fn compact_conversation(
104    api_messages: &[Value],
105    runtime: &Runtime,
106    custom_instructions: Option<&str>,
107) -> Result<String> {
108    let mut parts: Vec<String> = Vec::new();
109    let mut file_ops = FileOps::new();
110
111    for msg in api_messages {
112        match msg["role"].as_str() {
113            Some("user") => {
114                if let Some(content) = msg["content"].as_str() {
115                    if content.contains("<context-summary>") {
116                        parts.push(format!("[Previous Summary]: {}", content));
117                    } else {
118                        parts.push(format!("[User]: {}", content));
119                    }
120                } else if let Some(content) = msg["content"].as_array() {
121                    // Tool results are shaped as user messages with tool_result blocks.
122                    for block in content {
123                        if block["type"].as_str() == Some("tool_result") {
124                            let id = block["tool_use_id"].as_str().unwrap_or("?");
125                            let text = block["content"].as_str()
126                                .or_else(|| block["content"].as_array()
127                                    .and_then(|a| a.first())
128                                    .and_then(|b| b["text"].as_str()))
129                                .unwrap_or("");
130                            let truncated: String = text.chars().take(2000).collect();
131                            if !truncated.is_empty() {
132                                parts.push(format!("[Tool result #{}]: {}", id, truncated));
133                            }
134                        }
135                    }
136                }
137            }
138            Some("assistant") => {
139                if let Some(content) = msg["content"].as_array() {
140                    for block in content {
141                        match block["type"].as_str() {
142                            Some("thinking") => {
143                                if let Some(text) = block["thinking"].as_str() {
144                                    let preview: String = text.chars().take(500).collect();
145                                    parts.push(format!("[Assistant thinking]: {}", preview));
146                                }
147                            }
148                            Some("text") => {
149                                if let Some(text) = block["text"].as_str() {
150                                    parts.push(format!("[Assistant]: {}", text));
151                                }
152                            }
153                            Some("tool_use") => {
154                                let id = block["id"].as_str().unwrap_or("?");
155                                let name = block["name"].as_str().unwrap_or("");
156                                let input = &block["input"];
157                                if let Some(path) = input["path"].as_str() {
158                                    match name {
159                                        "read" => { file_ops.read.insert(path.to_string()); }
160                                        "write" => { file_ops.written.insert(path.to_string()); }
161                                        "edit" => { file_ops.edited.insert(path.to_string()); }
162                                        _ => {}
163                                    }
164                                }
165                                let args_str = serde_json::to_string(input).unwrap_or_default();
166                                let truncated: String = args_str.chars().take(500).collect();
167                                parts.push(format!("[Tool call #{}: {}({})]", id, name, truncated));
168                            }
169                            _ => {}
170                        }
171                    }
172                } else if let Some(content) = msg["content"].as_str() {
173                    parts.push(format!("[Assistant]: {}", content));
174                }
175            }
176            _ => {}
177        }
178    }
179
180    let conversation_text = parts.join("\n\n");
181
182    // Build file-operations summary (read-only = read but not modified).
183    let modified: std::collections::HashSet<String> =
184        file_ops.written.union(&file_ops.edited).cloned().collect();
185    let read_only: Vec<String> = file_ops.read.difference(&modified).cloned().collect();
186    let modified_list: Vec<String> = modified.into_iter().collect();
187
188    let mut file_section = String::new();
189    if !read_only.is_empty() {
190        file_section.push_str(&format!(
191            "\n\n<read-files>\n{}\n</read-files>",
192            read_only.join("\n")
193        ));
194    }
195    if !modified_list.is_empty() {
196        file_section.push_str(&format!(
197            "\n\n<modified-files>\n{}\n</modified-files>",
198            modified_list.join("\n")
199        ));
200    }
201
202    // Iterative compaction — if the first user message already contains a
203    // summary wrapper, we're compacting on top of a previous compaction.
204    let has_previous_summary = api_messages.first()
205        .and_then(|m| m["content"].as_str())
206        .is_some_and(|c| c.contains("<context-summary>"));
207
208    let base_prompt = if has_previous_summary {
209        UPDATE_SUMMARIZATION_PROMPT
210    } else {
211        SUMMARIZATION_PROMPT
212    };
213
214    let mut prompt_text = format!("<conversation>\n{}\n</conversation>\n\n", conversation_text);
215    if let Some(instructions) = custom_instructions {
216        prompt_text.push_str(&format!("{}\n\nAdditional focus: {}", base_prompt, instructions));
217    } else {
218        prompt_text.push_str(base_prompt);
219    }
220    prompt_text.push_str(&format!(
221        "\n\nAlso append these file operation records to the end of your summary:{}",
222        file_section
223    ));
224
225    let user_msg = json!({"role": "user", "content": prompt_text});
226    runtime.compact_call(vec![user_msg]).await
227}