syncable_cli/agent/
mod.rs

1//! Agent module for interactive AI-powered CLI assistance
2//!
3//! This module provides an agent layer using the Rig library that allows users
4//! to interact with the CLI through natural language conversations.
5//!
6//! # Features
7//!
8//! - **Conversation History**: Maintains context across multiple turns
9//! - **Automatic Compaction**: Compresses old history when token count exceeds threshold
10//! - **Tool Tracking**: Records tool calls for better context preservation
11//!
12//! # Usage
13//!
14//! ```bash
15//! # Interactive mode
16//! sync-ctl chat
17//!
18//! # With specific provider
19//! sync-ctl chat --provider openai --model gpt-5.2
20//!
21//! # Single query
22//! sync-ctl chat --query "What security issues does this project have?"
23//! ```
24//!
25//! # Interactive Commands
26//!
27//! - `/model` - Switch to a different AI model
28//! - `/provider` - Switch provider (prompts for API key if needed)
29//! - `/help` - Show available commands
30//! - `/clear` - Clear conversation history
31//! - `/exit` - Exit the chat
32
33pub mod commands;
34pub mod history;
35pub mod ide;
36pub mod prompts;
37pub mod session;
38pub mod tools;
39pub mod ui;
40
41use colored::Colorize;
42use history::{ConversationHistory, ToolCallRecord};
43use ide::IdeClient;
44use rig::{
45    client::{CompletionClient, ProviderClient},
46    completion::Prompt,
47    providers::{anthropic, openai},
48};
49use session::ChatSession;
50use commands::TokenUsage;
51use std::path::Path;
52use std::sync::Arc;
53use tokio::sync::Mutex as TokioMutex;
54use ui::{ResponseFormatter, ToolDisplayHook};
55
56/// Provider type for the agent
57#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
58pub enum ProviderType {
59    #[default]
60    OpenAI,
61    Anthropic,
62}
63
64impl std::fmt::Display for ProviderType {
65    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66        match self {
67            ProviderType::OpenAI => write!(f, "openai"),
68            ProviderType::Anthropic => write!(f, "anthropic"),
69        }
70    }
71}
72
73impl std::str::FromStr for ProviderType {
74    type Err = String;
75
76    fn from_str(s: &str) -> Result<Self, Self::Err> {
77        match s.to_lowercase().as_str() {
78            "openai" => Ok(ProviderType::OpenAI),
79            "anthropic" => Ok(ProviderType::Anthropic),
80            _ => Err(format!("Unknown provider: {}", s)),
81        }
82    }
83}
84
85/// Error types for the agent
86#[derive(Debug, thiserror::Error)]
87pub enum AgentError {
88    #[error("Missing API key. Set {0} environment variable.")]
89    MissingApiKey(String),
90
91    #[error("Provider error: {0}")]
92    ProviderError(String),
93
94    #[error("Tool error: {0}")]
95    ToolError(String),
96}
97
98pub type AgentResult<T> = Result<T, AgentError>;
99
100/// Get the system prompt for the agent based on query type
101fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
102    // If query suggests generation (Docker, Terraform, Helm), use DevOps prompt
103    if let Some(q) = query {
104        if prompts::is_generation_query(q) {
105            return prompts::get_devops_prompt(project_path);
106        }
107    }
108    // Default to analysis prompt
109    prompts::get_analysis_prompt(project_path)
110}
111
112/// Run the agent in interactive mode with custom REPL supporting /model and /provider commands
113pub async fn run_interactive(
114    project_path: &Path,
115    provider: ProviderType,
116    model: Option<String>,
117) -> AgentResult<()> {
118    use tools::*;
119
120    let mut session = ChatSession::new(project_path, provider, model);
121
122    // Initialize conversation history with compaction support
123    let mut conversation_history = ConversationHistory::new();
124
125    // Initialize IDE client for native diff viewing
126    let ide_client: Option<Arc<TokioMutex<IdeClient>>> = {
127        let mut client = IdeClient::new().await;
128        if client.is_ide_available() {
129            match client.connect().await {
130                Ok(()) => {
131                    println!(
132                        "{} Connected to {} IDE companion",
133                        "✓".green(),
134                        client.ide_name().unwrap_or("VS Code")
135                    );
136                    Some(Arc::new(TokioMutex::new(client)))
137                }
138                Err(_) => {
139                    // IDE detected but companion not running - that's fine
140                    None
141                }
142            }
143        } else {
144            None
145        }
146    };
147
148    // Load API key from config file to env if not already set
149    ChatSession::load_api_key_to_env(session.provider);
150
151    // Check if API key is configured, prompt if not
152    if !ChatSession::has_api_key(session.provider) {
153        ChatSession::prompt_api_key(session.provider)?;
154    }
155
156    session.print_banner();
157
158    loop {
159        // Show conversation status if we have history
160        if !conversation_history.is_empty() {
161            println!("{}", format!("  💬 Context: {}", conversation_history.status()).dimmed());
162        }
163
164        // Read user input
165        let input = match session.read_input() {
166            Ok(input) => input,
167            Err(_) => break,
168        };
169
170        if input.is_empty() {
171            continue;
172        }
173
174        // Check for commands
175        if ChatSession::is_command(&input) {
176            // Special handling for /clear to also clear conversation history
177            if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
178                conversation_history.clear();
179            }
180            match session.process_command(&input) {
181                Ok(true) => continue,
182                Ok(false) => break, // /exit
183                Err(e) => {
184                    eprintln!("{}", format!("Error: {}", e).red());
185                    continue;
186                }
187            }
188        }
189
190        // Check API key before making request (in case provider changed)
191        if !ChatSession::has_api_key(session.provider) {
192            eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
193            continue;
194        }
195
196        // Check if compaction is needed before making the request
197        if conversation_history.needs_compaction() {
198            println!("{}", "  📦 Compacting conversation history...".dimmed());
199            if let Some(summary) = conversation_history.compact() {
200                println!("{}", format!("  ✓ Compressed {} turns", summary.matches("Turn").count()).dimmed());
201            }
202        }
203
204        // Create hook for Claude Code style tool display
205        let hook = ToolDisplayHook::new();
206
207        let project_path_buf = session.project_path.clone();
208        // Select prompt based on query type (analysis vs generation)
209        let preamble = get_system_prompt(&session.project_path, Some(&input));
210        let is_generation = prompts::is_generation_query(&input);
211
212        // Convert conversation history to Rig Message format
213        let mut chat_history = conversation_history.to_messages();
214
215        let response = match session.provider {
216            ProviderType::OpenAI => {
217                let client = openai::Client::from_env();
218                // For GPT-5.x reasoning models, enable reasoning with summary output
219                // so we can see the model's thinking process
220                let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
221                    Some(serde_json::json!({
222                        "reasoning": {
223                            "effort": "medium",
224                            "summary": "detailed"
225                        }
226                    }))
227                } else {
228                    None
229                };
230
231                let mut builder = client
232                    .agent(&session.model)
233                    .preamble(&preamble)
234                    .max_tokens(4096)
235                    .tool(AnalyzeTool::new(project_path_buf.clone()))
236                    .tool(SecurityScanTool::new(project_path_buf.clone()))
237                    .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
238                    .tool(ReadFileTool::new(project_path_buf.clone()))
239                    .tool(ListDirectoryTool::new(project_path_buf.clone()));
240
241                // Add generation tools if this is a generation query
242                if is_generation {
243                    // Create WriteFileTool with IDE client if connected
244                    let write_file_tool = if let Some(ref client) = ide_client {
245                        WriteFileTool::new(project_path_buf.clone())
246                            .with_ide_client(client.clone())
247                    } else {
248                        WriteFileTool::new(project_path_buf.clone())
249                    };
250                    builder = builder
251                        .tool(write_file_tool)
252                        .tool(WriteFilesTool::new(project_path_buf.clone()))
253                        .tool(ShellTool::new(project_path_buf.clone()));
254                }
255
256                if let Some(params) = reasoning_params {
257                    builder = builder.additional_params(params);
258                }
259
260                let agent = builder.build();
261                // Allow up to 50 tool call turns for complex generation tasks
262                // Use hook to display tool calls as they happen
263                // Pass conversation history for context continuity
264                agent.prompt(&input)
265                    .with_history(&mut chat_history)
266                    .with_hook(hook.clone())
267                    .multi_turn(50)
268                    .await
269            }
270            ProviderType::Anthropic => {
271                let client = anthropic::Client::from_env();
272                let mut builder = client
273                    .agent(&session.model)
274                    .preamble(&preamble)
275                    .max_tokens(4096)
276                    .tool(AnalyzeTool::new(project_path_buf.clone()))
277                    .tool(SecurityScanTool::new(project_path_buf.clone()))
278                    .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
279                    .tool(ReadFileTool::new(project_path_buf.clone()))
280                    .tool(ListDirectoryTool::new(project_path_buf.clone()));
281
282                // Add generation tools if this is a generation query
283                if is_generation {
284                    // Create WriteFileTool with IDE client if connected
285                    let write_file_tool = if let Some(ref client) = ide_client {
286                        WriteFileTool::new(project_path_buf.clone())
287                            .with_ide_client(client.clone())
288                    } else {
289                        WriteFileTool::new(project_path_buf.clone())
290                    };
291                    builder = builder
292                        .tool(write_file_tool)
293                        .tool(WriteFilesTool::new(project_path_buf.clone()))
294                        .tool(ShellTool::new(project_path_buf.clone()));
295                }
296
297                let agent = builder.build();
298
299                // Allow up to 50 tool call turns for complex generation tasks
300                // Use hook to display tool calls as they happen
301                // Pass conversation history for context continuity
302                agent.prompt(&input)
303                    .with_history(&mut chat_history)
304                    .with_hook(hook.clone())
305                    .multi_turn(50)
306                    .await
307            }
308        };
309
310        match response {
311            Ok(text) => {
312                // Show final response
313                println!();
314                ResponseFormatter::print_response(&text);
315
316                // Track token usage (estimate since Rig doesn't expose exact counts)
317                let prompt_tokens = TokenUsage::estimate_tokens(&input);
318                let completion_tokens = TokenUsage::estimate_tokens(&text);
319                session.token_usage.add_request(prompt_tokens, completion_tokens);
320
321                // Extract tool calls from the hook state for history tracking
322                let tool_calls = extract_tool_calls_from_hook(&hook).await;
323
324                // Add to conversation history with tool call records
325                conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
326
327                // Also update legacy session history for compatibility
328                session.history.push(("user".to_string(), input));
329                session.history.push(("assistant".to_string(), text));
330            }
331            Err(e) => {
332                let err_str = e.to_string();
333                println!();
334                // Check if this is a max depth error
335                if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
336                    eprintln!("{}", "Reached tool call limit (50 turns).".yellow());
337                    eprintln!("{}", "Type 'continue' to resume, or ask a new question.".dimmed());
338                } else {
339                    eprintln!("{}", format!("Error: {}", e).red());
340                }
341            }
342        }
343        println!();
344    }
345
346    Ok(())
347}
348
349/// Extract tool call records from the hook state for history tracking
350async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
351    let state = hook.state();
352    let guard = state.lock().await;
353
354    guard.tool_calls.iter().map(|tc| {
355        ToolCallRecord {
356            tool_name: tc.name.clone(),
357            args_summary: truncate_string(&tc.args, 100),
358            result_summary: tc.output.as_ref()
359                .map(|o| truncate_string(o, 200))
360                .unwrap_or_else(|| "completed".to_string()),
361        }
362    }).collect()
363}
364
365/// Helper to truncate strings for summaries
366fn truncate_string(s: &str, max_len: usize) -> String {
367    if s.len() <= max_len {
368        s.to_string()
369    } else {
370        format!("{}...", &s[..max_len.saturating_sub(3)])
371    }
372}
373
374/// Run a single query and return the response
375pub async fn run_query(
376    project_path: &Path,
377    query: &str,
378    provider: ProviderType,
379    model: Option<String>,
380) -> AgentResult<String> {
381    use tools::*;
382
383    let project_path_buf = project_path.to_path_buf();
384    // Select prompt based on query type (analysis vs generation)
385    let preamble = get_system_prompt(project_path, Some(query));
386    let is_generation = prompts::is_generation_query(query);
387
388    match provider {
389        ProviderType::OpenAI => {
390            let client = openai::Client::from_env();
391            let model_name = model.as_deref().unwrap_or("gpt-5.2");
392
393            // For GPT-5.x reasoning models, enable reasoning with summary output
394            let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
395                Some(serde_json::json!({
396                    "reasoning": {
397                        "effort": "medium",
398                        "summary": "detailed"
399                    }
400                }))
401            } else {
402                None
403            };
404
405            let mut builder = client
406                .agent(model_name)
407                .preamble(&preamble)
408                .max_tokens(4096)
409                .tool(AnalyzeTool::new(project_path_buf.clone()))
410                .tool(SecurityScanTool::new(project_path_buf.clone()))
411                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
412                .tool(ReadFileTool::new(project_path_buf.clone()))
413                .tool(ListDirectoryTool::new(project_path_buf.clone()));
414
415            // Add generation tools if this is a generation query
416            if is_generation {
417                builder = builder
418                    .tool(WriteFileTool::new(project_path_buf.clone()))
419                    .tool(WriteFilesTool::new(project_path_buf.clone()))
420                    .tool(ShellTool::new(project_path_buf.clone()));
421            }
422
423            if let Some(params) = reasoning_params {
424                builder = builder.additional_params(params);
425            }
426
427            let agent = builder.build();
428
429            agent
430                .prompt(query)
431                .multi_turn(50)
432                .await
433                .map_err(|e| AgentError::ProviderError(e.to_string()))
434        }
435        ProviderType::Anthropic => {
436            let client = anthropic::Client::from_env();
437            let model_name = model.as_deref().unwrap_or("claude-sonnet-4-20250514");
438
439            let mut builder = client
440                .agent(model_name)
441                .preamble(&preamble)
442                .max_tokens(4096)
443                .tool(AnalyzeTool::new(project_path_buf.clone()))
444                .tool(SecurityScanTool::new(project_path_buf.clone()))
445                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
446                .tool(ReadFileTool::new(project_path_buf.clone()))
447                .tool(ListDirectoryTool::new(project_path_buf.clone()));
448
449            // Add generation tools if this is a generation query
450            if is_generation {
451                builder = builder
452                    .tool(WriteFileTool::new(project_path_buf.clone()))
453                    .tool(WriteFilesTool::new(project_path_buf.clone()))
454                    .tool(ShellTool::new(project_path_buf.clone()));
455            }
456
457            let agent = builder.build();
458
459            agent
460                .prompt(query)
461                .multi_turn(50)
462                .await
463                .map_err(|e| AgentError::ProviderError(e.to_string()))
464        }
465    }
466}