syncable_cli/agent/
mod.rs

1//! Agent module for interactive AI-powered CLI assistance
2//!
3//! This module provides an agent layer using the Rig library that allows users
4//! to interact with the CLI through natural language conversations.
5//!
6//! # Features
7//!
8//! - **Conversation History**: Maintains context across multiple turns
9//! - **Automatic Compaction**: Compresses old history when token count exceeds threshold
10//! - **Tool Tracking**: Records tool calls for better context preservation
11//!
12//! # Usage
13//!
14//! ```bash
15//! # Interactive mode
16//! sync-ctl chat
17//!
18//! # With specific provider
19//! sync-ctl chat --provider openai --model gpt-5.2
20//!
21//! # Single query
22//! sync-ctl chat --query "What security issues does this project have?"
23//! ```
24//!
25//! # Interactive Commands
26//!
27//! - `/model` - Switch to a different AI model
28//! - `/provider` - Switch provider (prompts for API key if needed)
29//! - `/help` - Show available commands
30//! - `/clear` - Clear conversation history
31//! - `/exit` - Exit the chat
32
33pub mod commands;
34pub mod history;
35pub mod ide;
36pub mod prompts;
37pub mod session;
38pub mod tools;
39pub mod ui;
40
41use colored::Colorize;
42use history::{ConversationHistory, ToolCallRecord};
43use ide::IdeClient;
44use rig::{
45    client::{CompletionClient, ProviderClient},
46    completion::Prompt,
47    providers::{anthropic, openai},
48};
49use session::ChatSession;
50use commands::TokenUsage;
51use std::path::Path;
52use std::sync::Arc;
53use tokio::sync::Mutex as TokioMutex;
54use ui::{ResponseFormatter, ToolDisplayHook};
55
56/// Provider type for the agent
57#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
58pub enum ProviderType {
59    #[default]
60    OpenAI,
61    Anthropic,
62}
63
64impl std::fmt::Display for ProviderType {
65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66        match self {
67            ProviderType::OpenAI => write!(f, "openai"),
68            ProviderType::Anthropic => write!(f, "anthropic"),
69        }
70    }
71}
72
73impl std::str::FromStr for ProviderType {
74    type Err = String;
75
76    fn from_str(s: &str) -> Result<Self, Self::Err> {
77        match s.to_lowercase().as_str() {
78            "openai" => Ok(ProviderType::OpenAI),
79            "anthropic" => Ok(ProviderType::Anthropic),
80            _ => Err(format!("Unknown provider: {}", s)),
81        }
82    }
83}
84
85/// Error types for the agent
86#[derive(Debug, thiserror::Error)]
87pub enum AgentError {
88    #[error("Missing API key. Set {0} environment variable.")]
89    MissingApiKey(String),
90
91    #[error("Provider error: {0}")]
92    ProviderError(String),
93
94    #[error("Tool error: {0}")]
95    ToolError(String),
96}
97
98pub type AgentResult<T> = Result<T, AgentError>;
99
100/// Get the system prompt for the agent based on query type
101fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
102    // If query suggests generation (Docker, Terraform, Helm), use DevOps prompt
103    if let Some(q) = query {
104        if prompts::is_generation_query(q) {
105            return prompts::get_devops_prompt(project_path);
106        }
107    }
108    // Default to analysis prompt
109    prompts::get_analysis_prompt(project_path)
110}
111
112/// Run the agent in interactive mode with custom REPL supporting /model and /provider commands
113pub async fn run_interactive(
114    project_path: &Path,
115    provider: ProviderType,
116    model: Option<String>,
117) -> AgentResult<()> {
118    use tools::*;
119
120    let mut session = ChatSession::new(project_path, provider, model);
121
122    // Initialize conversation history with compaction support
123    let mut conversation_history = ConversationHistory::new();
124
125    // Initialize IDE client for native diff viewing
126    let ide_client: Option<Arc<TokioMutex<IdeClient>>> = {
127        let mut client = IdeClient::new().await;
128        if client.is_ide_available() {
129            match client.connect().await {
130                Ok(()) => {
131                    println!(
132                        "{} Connected to {} IDE companion",
133                        "โœ“".green(),
134                        client.ide_name().unwrap_or("VS Code")
135                    );
136                    Some(Arc::new(TokioMutex::new(client)))
137                }
138                Err(e) => {
139                    // IDE detected but companion not running or connection failed
140                    println!(
141                        "{} IDE companion not connected: {}",
142                        "!".yellow(),
143                        e
144                    );
145                    None
146                }
147            }
148        } else {
149            println!("{} No IDE detected (TERM_PROGRAM={})", "ยท".dimmed(), std::env::var("TERM_PROGRAM").unwrap_or_default());
150            None
151        }
152    };
153
154    // Load API key from config file to env if not already set
155    ChatSession::load_api_key_to_env(session.provider);
156
157    // Check if API key is configured, prompt if not
158    if !ChatSession::has_api_key(session.provider) {
159        ChatSession::prompt_api_key(session.provider)?;
160    }
161
162    session.print_banner();
163
164    loop {
165        // Show conversation status if we have history
166        if !conversation_history.is_empty() {
167            println!("{}", format!("  ๐Ÿ’ฌ Context: {}", conversation_history.status()).dimmed());
168        }
169
170        // Read user input
171        let input = match session.read_input() {
172            Ok(input) => input,
173            Err(_) => break,
174        };
175
176        if input.is_empty() {
177            continue;
178        }
179
180        // Check for commands
181        if ChatSession::is_command(&input) {
182            // Special handling for /clear to also clear conversation history
183            if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
184                conversation_history.clear();
185            }
186            match session.process_command(&input) {
187                Ok(true) => continue,
188                Ok(false) => break, // /exit
189                Err(e) => {
190                    eprintln!("{}", format!("Error: {}", e).red());
191                    continue;
192                }
193            }
194        }
195
196        // Check API key before making request (in case provider changed)
197        if !ChatSession::has_api_key(session.provider) {
198            eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
199            continue;
200        }
201
202        // Check if compaction is needed before making the request
203        if conversation_history.needs_compaction() {
204            println!("{}", "  ๐Ÿ“ฆ Compacting conversation history...".dimmed());
205            if let Some(summary) = conversation_history.compact() {
206                println!("{}", format!("  โœ“ Compressed {} turns", summary.matches("Turn").count()).dimmed());
207            }
208        }
209
210        // Retry loop for automatic error recovery
211        // MAX_RETRIES is for failures without progress
212        // MAX_CONTINUATIONS is for truncations WITH progress (more generous)
213        // TOOL_CALL_CHECKPOINT is the interval at which we ask user to confirm
214        // MAX_TOOL_CALLS is the absolute maximum (300 = 6 checkpoints x 50)
215        const MAX_RETRIES: u32 = 3;
216        const MAX_CONTINUATIONS: u32 = 10;
217        const TOOL_CALL_CHECKPOINT: usize = 50;
218        const MAX_TOOL_CALLS: usize = 300;
219        let mut retry_attempt = 0;
220        let mut continuation_count = 0;
221        let mut total_tool_calls: usize = 0;
222        let mut auto_continue_tools = false; // User can select "always" to skip future prompts
223        let mut current_input = input.clone();
224        let mut succeeded = false;
225
226        while retry_attempt < MAX_RETRIES && continuation_count < MAX_CONTINUATIONS && !succeeded {
227
228            // Log if this is a continuation attempt
229            if continuation_count > 0 {
230                eprintln!("{}", format!("  ๐Ÿ“ก Sending continuation request...").dimmed());
231            }
232
233            // Create hook for Claude Code style tool display
234            let hook = ToolDisplayHook::new();
235
236            let project_path_buf = session.project_path.clone();
237            // Select prompt based on query type (analysis vs generation)
238            let preamble = get_system_prompt(&session.project_path, Some(&current_input));
239            let is_generation = prompts::is_generation_query(&current_input);
240
241            // Convert conversation history to Rig Message format
242            let mut chat_history = conversation_history.to_messages();
243
244            let response = match session.provider {
245                ProviderType::OpenAI => {
246                    let client = openai::Client::from_env();
247                    // For GPT-5.x reasoning models, enable reasoning with summary output
248                    // so we can see the model's thinking process
249                    let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
250                        Some(serde_json::json!({
251                            "reasoning": {
252                                "effort": "medium",
253                                "summary": "detailed"
254                            }
255                        }))
256                    } else {
257                        None
258                    };
259
260                    let mut builder = client
261                        .agent(&session.model)
262                        .preamble(&preamble)
263                        .max_tokens(4096)
264                        .tool(AnalyzeTool::new(project_path_buf.clone()))
265                        .tool(SecurityScanTool::new(project_path_buf.clone()))
266                        .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
267                        .tool(ReadFileTool::new(project_path_buf.clone()))
268                        .tool(ListDirectoryTool::new(project_path_buf.clone()));
269
270                    // Add generation tools if this is a generation query
271                    if is_generation {
272                        // Create file tools with IDE client if connected
273                        let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
274                            (
275                                WriteFileTool::new(project_path_buf.clone())
276                                    .with_ide_client(client.clone()),
277                                WriteFilesTool::new(project_path_buf.clone())
278                                    .with_ide_client(client.clone()),
279                            )
280                        } else {
281                            (
282                                WriteFileTool::new(project_path_buf.clone()),
283                                WriteFilesTool::new(project_path_buf.clone()),
284                            )
285                        };
286                        builder = builder
287                            .tool(write_file_tool)
288                            .tool(write_files_tool)
289                            .tool(ShellTool::new(project_path_buf.clone()));
290                    }
291
292                    if let Some(params) = reasoning_params {
293                        builder = builder.additional_params(params);
294                    }
295
296                    let agent = builder.build();
297                    // Allow up to 50 tool call turns for complex generation tasks
298                    // Use hook to display tool calls as they happen
299                    // Pass conversation history for context continuity
300                    agent.prompt(&current_input)
301                        .with_history(&mut chat_history)
302                        .with_hook(hook.clone())
303                        .multi_turn(50)
304                        .await
305                }
306                ProviderType::Anthropic => {
307                    let client = anthropic::Client::from_env();
308                    let mut builder = client
309                        .agent(&session.model)
310                        .preamble(&preamble)
311                        .max_tokens(4096)
312                        .tool(AnalyzeTool::new(project_path_buf.clone()))
313                        .tool(SecurityScanTool::new(project_path_buf.clone()))
314                        .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
315                        .tool(ReadFileTool::new(project_path_buf.clone()))
316                        .tool(ListDirectoryTool::new(project_path_buf.clone()));
317
318                    // Add generation tools if this is a generation query
319                    if is_generation {
320                        // Create file tools with IDE client if connected
321                        let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
322                            (
323                                WriteFileTool::new(project_path_buf.clone())
324                                    .with_ide_client(client.clone()),
325                                WriteFilesTool::new(project_path_buf.clone())
326                                    .with_ide_client(client.clone()),
327                            )
328                        } else {
329                            (
330                                WriteFileTool::new(project_path_buf.clone()),
331                                WriteFilesTool::new(project_path_buf.clone()),
332                            )
333                        };
334                        builder = builder
335                            .tool(write_file_tool)
336                            .tool(write_files_tool)
337                            .tool(ShellTool::new(project_path_buf.clone()));
338                    }
339
340                    let agent = builder.build();
341
342                    // Allow up to 50 tool call turns for complex generation tasks
343                    // Use hook to display tool calls as they happen
344                    // Pass conversation history for context continuity
345                    agent.prompt(&current_input)
346                        .with_history(&mut chat_history)
347                        .with_hook(hook.clone())
348                        .multi_turn(50)
349                        .await
350                }
351            };
352
353            match response {
354                Ok(text) => {
355                    // Show final response
356                    println!();
357                    ResponseFormatter::print_response(&text);
358
359                    // Track token usage (estimate since Rig doesn't expose exact counts)
360                    let prompt_tokens = TokenUsage::estimate_tokens(&input);
361                    let completion_tokens = TokenUsage::estimate_tokens(&text);
362                    session.token_usage.add_request(prompt_tokens, completion_tokens);
363
364                    // Extract tool calls from the hook state for history tracking
365                    let tool_calls = extract_tool_calls_from_hook(&hook).await;
366                    let batch_tool_count = tool_calls.len();
367                    total_tool_calls += batch_tool_count;
368
369                    // Show tool call summary if significant
370                    if batch_tool_count > 10 {
371                        println!("{}", format!("  โœ“ Completed with {} tool calls ({} total this session)", batch_tool_count, total_tool_calls).dimmed());
372                    }
373
374                    // Add to conversation history with tool call records
375                    conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
376
377                    // Check if this heavy turn requires immediate compaction
378                    // This helps prevent context overflow in subsequent requests
379                    if conversation_history.needs_compaction() {
380                        println!("{}", "  ๐Ÿ“ฆ Compacting conversation history...".dimmed());
381                        if let Some(summary) = conversation_history.compact() {
382                            println!("{}", format!("  โœ“ Compressed {} turns", summary.matches("Turn").count()).dimmed());
383                        }
384                    }
385
386                    // Also update legacy session history for compatibility
387                    session.history.push(("user".to_string(), input.clone()));
388                    session.history.push(("assistant".to_string(), text));
389                    succeeded = true;
390                }
391                Err(e) => {
392                    let err_str = e.to_string();
393
394                    println!();
395
396                    // Check if this is a max depth error - handle as checkpoint
397                    if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
398                        // Extract what was done before hitting the limit
399                        let completed_tools = extract_tool_calls_from_hook(&hook).await;
400                        let agent_thinking = extract_agent_messages_from_hook(&hook).await;
401                        let batch_tool_count = completed_tools.len();
402                        total_tool_calls += batch_tool_count;
403
404                        eprintln!("{}", format!(
405                            "โš  Reached {} tool calls this batch ({} total). Maximum allowed: {}",
406                            batch_tool_count, total_tool_calls, MAX_TOOL_CALLS
407                        ).yellow());
408
409                        // Check if we've hit the absolute maximum
410                        if total_tool_calls >= MAX_TOOL_CALLS {
411                            eprintln!("{}", format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS).red());
412                            eprintln!("{}", "The task is too complex. Try breaking it into smaller parts.".dimmed());
413                            break;
414                        }
415
416                        // Ask user if they want to continue (unless auto-continue is enabled)
417                        let should_continue = if auto_continue_tools {
418                            eprintln!("{}", "  Auto-continuing (you selected 'always')...".dimmed());
419                            true
420                        } else {
421                            eprintln!("{}", "Excessive tool calls used. Want to continue?".yellow());
422                            eprintln!("{}", "  [y] Yes, continue  [n] No, stop  [a] Always continue".dimmed());
423                            print!("  > ");
424                            let _ = std::io::Write::flush(&mut std::io::stdout());
425
426                            // Read user input
427                            let mut response = String::new();
428                            match std::io::stdin().read_line(&mut response) {
429                                Ok(_) => {
430                                    let resp = response.trim().to_lowercase();
431                                    if resp == "a" || resp == "always" {
432                                        auto_continue_tools = true;
433                                        true
434                                    } else {
435                                        resp == "y" || resp == "yes" || resp.is_empty()
436                                    }
437                                }
438                                Err(_) => false,
439                            }
440                        };
441
442                        if !should_continue {
443                            eprintln!("{}", "Stopped by user. Type 'continue' to resume later.".dimmed());
444                            // Add partial progress to history
445                            if !completed_tools.is_empty() {
446                                conversation_history.add_turn(
447                                    current_input.clone(),
448                                    format!("[Stopped at checkpoint - {} tools completed]", batch_tool_count),
449                                    vec![]
450                                );
451                            }
452                            break;
453                        }
454
455                        // Continue from checkpoint
456                        eprintln!("{}", format!(
457                            "  โ†’ Continuing... {} remaining tool calls available",
458                            MAX_TOOL_CALLS - total_tool_calls
459                        ).dimmed());
460
461                        // Add partial progress to history (without duplicating tool calls)
462                        conversation_history.add_turn(
463                            current_input.clone(),
464                            format!("[Checkpoint - {} tools completed, continuing...]", batch_tool_count),
465                            vec![]
466                        );
467
468                        // Build continuation prompt
469                        current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
470
471                        // Brief delay before continuation
472                        tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
473                        continue; // Continue the loop without incrementing retry_attempt
474                    } else if err_str.contains("rate") || err_str.contains("Rate") || err_str.contains("429") {
475                        eprintln!("{}", "โš  Rate limited by API provider.".yellow());
476                        // Wait before retry for rate limits
477                        retry_attempt += 1;
478                        eprintln!("{}", format!("  Waiting 5 seconds before retry ({}/{})...", retry_attempt, MAX_RETRIES).dimmed());
479                        tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
480                    } else if is_truncation_error(&err_str) {
481                        // Truncation error - try intelligent continuation
482                        let completed_tools = extract_tool_calls_from_hook(&hook).await;
483                        let agent_thinking = extract_agent_messages_from_hook(&hook).await;
484
485                        // Count actually completed tools (not in-progress)
486                        let completed_count = completed_tools.iter()
487                            .filter(|t| !t.result_summary.contains("IN PROGRESS"))
488                            .count();
489                        let in_progress_count = completed_tools.len() - completed_count;
490
491                        if !completed_tools.is_empty() && continuation_count < MAX_CONTINUATIONS {
492                            // We have partial progress - continue from where we left off
493                            continuation_count += 1;
494                            let status_msg = if in_progress_count > 0 {
495                                format!(
496                                    "โš  Response truncated. {} completed, {} in-progress. Auto-continuing ({}/{})...",
497                                    completed_count, in_progress_count, continuation_count, MAX_CONTINUATIONS
498                                )
499                            } else {
500                                format!(
501                                    "โš  Response truncated. {} tool calls completed. Auto-continuing ({}/{})...",
502                                    completed_count, continuation_count, MAX_CONTINUATIONS
503                                )
504                            };
505                            eprintln!("{}", status_msg.yellow());
506
507                            // Add partial progress to conversation history
508                            // NOTE: We intentionally pass empty tool_calls here because the
509                            // continuation prompt already contains the detailed file list.
510                            // Including them in history would duplicate the context and waste tokens.
511                            conversation_history.add_turn(
512                                current_input.clone(),
513                                format!("[Partial response - {} tools completed, {} in-progress before truncation. See continuation prompt for details.]",
514                                    completed_count, in_progress_count),
515                                vec![]  // Don't duplicate - continuation prompt has the details
516                            );
517
518                            // Check if we need compaction after adding this heavy turn
519                            // This is important for long multi-turn sessions with many tool calls
520                            if conversation_history.needs_compaction() {
521                                eprintln!("{}", "  ๐Ÿ“ฆ Compacting history before continuation...".dimmed());
522                                if let Some(summary) = conversation_history.compact() {
523                                    eprintln!("{}", format!("  โœ“ Compressed {} turns", summary.matches("Turn").count()).dimmed());
524                                }
525                            }
526
527                            // Build continuation prompt with context
528                            current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
529
530                            // Log continuation details for debugging
531                            eprintln!("{}", format!(
532                                "  โ†’ Continuing with {} files read, {} written, {} other actions tracked",
533                                completed_tools.iter().filter(|t| t.tool_name == "read_file").count(),
534                                completed_tools.iter().filter(|t| t.tool_name == "write_file" || t.tool_name == "write_files").count(),
535                                completed_tools.iter().filter(|t| t.tool_name != "read_file" && t.tool_name != "write_file" && t.tool_name != "write_files" && t.tool_name != "list_directory").count()
536                            ).dimmed());
537
538                            // Brief delay before continuation
539                            tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
540                            // Don't increment retry_attempt - this is progress via continuation
541                        } else if retry_attempt < MAX_RETRIES {
542                            // No tool calls completed - simple retry
543                            retry_attempt += 1;
544                            eprintln!("{}", format!("โš  Response error (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
545                            tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
546                        } else {
547                            // Max retries/continuations reached
548                            eprintln!("{}", format!("Error: {}", e).red());
549                            if continuation_count >= MAX_CONTINUATIONS {
550                                eprintln!("{}", format!("Max continuations ({}) reached. The task is too complex for one request.", MAX_CONTINUATIONS).dimmed());
551                            } else {
552                                eprintln!("{}", "Max retries reached. The response may be too complex.".dimmed());
553                            }
554                            eprintln!("{}", "Try breaking your request into smaller parts.".dimmed());
555                            break;
556                        }
557                    } else if err_str.contains("timeout") || err_str.contains("Timeout") {
558                        // Timeout - simple retry
559                        retry_attempt += 1;
560                        if retry_attempt < MAX_RETRIES {
561                            eprintln!("{}", format!("โš  Request timed out (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
562                            tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
563                        } else {
564                            eprintln!("{}", "Request timed out. Please try again.".red());
565                            break;
566                        }
567                    } else {
568                        // Unknown error - show details and break
569                        eprintln!("{}", format!("Error: {}", e).red());
570                        if continuation_count > 0 {
571                            eprintln!("{}", format!("  (occurred during continuation attempt {})", continuation_count).dimmed());
572                        }
573                        eprintln!("{}", "Error details for debugging:".dimmed());
574                        eprintln!("{}", format!("  - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES).dimmed());
575                        eprintln!("{}", format!("  - continuation_count: {}/{}", continuation_count, MAX_CONTINUATIONS).dimmed());
576                        break;
577                    }
578                }
579            }
580        }
581        println!();
582    }
583
584    Ok(())
585}
586
587/// Extract tool call records from the hook state for history tracking
588async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
589    let state = hook.state();
590    let guard = state.lock().await;
591
592    guard.tool_calls.iter().enumerate().map(|(i, tc)| {
593        let result = if tc.is_running {
594            // Tool was in progress when error occurred
595            "[IN PROGRESS - may need to be re-run]".to_string()
596        } else if let Some(output) = &tc.output {
597            truncate_string(output, 200)
598        } else {
599            "completed".to_string()
600        };
601
602        ToolCallRecord {
603            tool_name: tc.name.clone(),
604            args_summary: truncate_string(&tc.args, 100),
605            result_summary: result,
606            // Generate a unique tool ID for proper message pairing
607            tool_id: Some(format!("tool_{}_{}", tc.name, i)),
608        }
609    }).collect()
610}
611
612/// Extract any agent thinking/messages from the hook for context
613async fn extract_agent_messages_from_hook(hook: &ToolDisplayHook) -> Vec<String> {
614    let state = hook.state();
615    let guard = state.lock().await;
616    guard.agent_messages.clone()
617}
618
619/// Helper to truncate strings for summaries
620fn truncate_string(s: &str, max_len: usize) -> String {
621    if s.len() <= max_len {
622        s.to_string()
623    } else {
624        format!("{}...", &s[..max_len.saturating_sub(3)])
625    }
626}
627
628/// Check if an error is a truncation/JSON parsing error that can be recovered via continuation
629fn is_truncation_error(err_str: &str) -> bool {
630    err_str.contains("JsonError")
631        || err_str.contains("EOF while parsing")
632        || err_str.contains("JSON")
633        || err_str.contains("unexpected end")
634}
635
636/// Build a continuation prompt that tells the AI what work was completed
637/// and asks it to continue from where it left off
638fn build_continuation_prompt(
639    original_task: &str,
640    completed_tools: &[ToolCallRecord],
641    agent_thinking: &[String],
642) -> String {
643    use std::collections::HashSet;
644
645    // Group tools by type and extract unique files read
646    let mut files_read: HashSet<String> = HashSet::new();
647    let mut files_written: HashSet<String> = HashSet::new();
648    let mut dirs_listed: HashSet<String> = HashSet::new();
649    let mut other_tools: Vec<String> = Vec::new();
650    let mut in_progress: Vec<String> = Vec::new();
651
652    for tool in completed_tools {
653        let is_in_progress = tool.result_summary.contains("IN PROGRESS");
654
655        if is_in_progress {
656            in_progress.push(format!("{}({})", tool.tool_name, tool.args_summary));
657            continue;
658        }
659
660        match tool.tool_name.as_str() {
661            "read_file" => {
662                // Extract path from args
663                files_read.insert(tool.args_summary.clone());
664            }
665            "write_file" | "write_files" => {
666                files_written.insert(tool.args_summary.clone());
667            }
668            "list_directory" => {
669                dirs_listed.insert(tool.args_summary.clone());
670            }
671            _ => {
672                other_tools.push(format!("{}({})", tool.tool_name, truncate_string(&tool.args_summary, 40)));
673            }
674        }
675    }
676
677    let mut prompt = format!(
678        "[CONTINUE] Your previous response was interrupted. DO NOT repeat completed work.\n\n\
679        Original task: {}\n",
680        truncate_string(original_task, 500)
681    );
682
683    // Show files already read - CRITICAL for preventing re-reads
684    if !files_read.is_empty() {
685        prompt.push_str("\n== FILES ALREADY READ (do NOT read again) ==\n");
686        for file in &files_read {
687            prompt.push_str(&format!("  - {}\n", file));
688        }
689    }
690
691    if !dirs_listed.is_empty() {
692        prompt.push_str("\n== DIRECTORIES ALREADY LISTED ==\n");
693        for dir in &dirs_listed {
694            prompt.push_str(&format!("  - {}\n", dir));
695        }
696    }
697
698    if !files_written.is_empty() {
699        prompt.push_str("\n== FILES ALREADY WRITTEN ==\n");
700        for file in &files_written {
701            prompt.push_str(&format!("  - {}\n", file));
702        }
703    }
704
705    if !other_tools.is_empty() {
706        prompt.push_str("\n== OTHER COMPLETED ACTIONS ==\n");
707        for tool in other_tools.iter().take(20) {
708            prompt.push_str(&format!("  - {}\n", tool));
709        }
710        if other_tools.len() > 20 {
711            prompt.push_str(&format!("  ... and {} more\n", other_tools.len() - 20));
712        }
713    }
714
715    if !in_progress.is_empty() {
716        prompt.push_str("\n== INTERRUPTED (may need re-run) ==\n");
717        for tool in &in_progress {
718            prompt.push_str(&format!("  โš  {}\n", tool));
719        }
720    }
721
722    // Include last thinking context if available
723    if !agent_thinking.is_empty() {
724        if let Some(last_thought) = agent_thinking.last() {
725            prompt.push_str(&format!(
726                "\n== YOUR LAST THOUGHTS ==\n\"{}\"\n",
727                truncate_string(last_thought, 300)
728            ));
729        }
730    }
731
732    prompt.push_str("\n== INSTRUCTIONS ==\n");
733    prompt.push_str("IMPORTANT: Your previous response was too long and got cut off.\n");
734    prompt.push_str("1. Do NOT re-read files listed above - they are already in context.\n");
735    prompt.push_str("2. If writing a document, write it in SECTIONS - complete one section now, then continue.\n");
736    prompt.push_str("3. Keep your response SHORT and focused. Better to complete small chunks than fail on large ones.\n");
737    prompt.push_str("4. If the task involves writing a file, START WRITING NOW - don't explain what you'll do.\n");
738
739    prompt
740}
741
742/// Run a single query and return the response
743pub async fn run_query(
744    project_path: &Path,
745    query: &str,
746    provider: ProviderType,
747    model: Option<String>,
748) -> AgentResult<String> {
749    use tools::*;
750
751    let project_path_buf = project_path.to_path_buf();
752    // Select prompt based on query type (analysis vs generation)
753    let preamble = get_system_prompt(project_path, Some(query));
754    let is_generation = prompts::is_generation_query(query);
755
756    match provider {
757        ProviderType::OpenAI => {
758            let client = openai::Client::from_env();
759            let model_name = model.as_deref().unwrap_or("gpt-5.2");
760
761            // For GPT-5.x reasoning models, enable reasoning with summary output
762            let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
763                Some(serde_json::json!({
764                    "reasoning": {
765                        "effort": "medium",
766                        "summary": "detailed"
767                    }
768                }))
769            } else {
770                None
771            };
772
773            let mut builder = client
774                .agent(model_name)
775                .preamble(&preamble)
776                .max_tokens(4096)
777                .tool(AnalyzeTool::new(project_path_buf.clone()))
778                .tool(SecurityScanTool::new(project_path_buf.clone()))
779                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
780                .tool(ReadFileTool::new(project_path_buf.clone()))
781                .tool(ListDirectoryTool::new(project_path_buf.clone()));
782
783            // Add generation tools if this is a generation query
784            if is_generation {
785                builder = builder
786                    .tool(WriteFileTool::new(project_path_buf.clone()))
787                    .tool(WriteFilesTool::new(project_path_buf.clone()))
788                    .tool(ShellTool::new(project_path_buf.clone()));
789            }
790
791            if let Some(params) = reasoning_params {
792                builder = builder.additional_params(params);
793            }
794
795            let agent = builder.build();
796
797            agent
798                .prompt(query)
799                .multi_turn(50)
800                .await
801                .map_err(|e| AgentError::ProviderError(e.to_string()))
802        }
803        ProviderType::Anthropic => {
804            let client = anthropic::Client::from_env();
805            let model_name = model.as_deref().unwrap_or("claude-sonnet-4-20250514");
806
807            let mut builder = client
808                .agent(model_name)
809                .preamble(&preamble)
810                .max_tokens(4096)
811                .tool(AnalyzeTool::new(project_path_buf.clone()))
812                .tool(SecurityScanTool::new(project_path_buf.clone()))
813                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
814                .tool(ReadFileTool::new(project_path_buf.clone()))
815                .tool(ListDirectoryTool::new(project_path_buf.clone()));
816
817            // Add generation tools if this is a generation query
818            if is_generation {
819                builder = builder
820                    .tool(WriteFileTool::new(project_path_buf.clone()))
821                    .tool(WriteFilesTool::new(project_path_buf.clone()))
822                    .tool(ShellTool::new(project_path_buf.clone()));
823            }
824
825            let agent = builder.build();
826
827            agent
828                .prompt(query)
829                .multi_turn(50)
830                .await
831                .map_err(|e| AgentError::ProviderError(e.to_string()))
832        }
833    }
834}