syncable_cli/agent/
mod.rs

1//! Agent module for interactive AI-powered CLI assistance
2//!
3//! This module provides an agent layer using the Rig library that allows users
4//! to interact with the CLI through natural language conversations.
5//!
6//! # Features
7//!
8//! - **Conversation History**: Maintains context across multiple turns
9//! - **Automatic Compaction**: Compresses old history when token count exceeds threshold
10//! - **Tool Tracking**: Records tool calls for better context preservation
11//!
12//! # Usage
13//!
14//! ```bash
15//! # Interactive mode
16//! sync-ctl chat
17//!
18//! # With specific provider
19//! sync-ctl chat --provider openai --model gpt-5.2
20//!
21//! # Single query
22//! sync-ctl chat --query "What security issues does this project have?"
23//! ```
24//!
25//! # Interactive Commands
26//!
27//! - `/model` - Switch to a different AI model
28//! - `/provider` - Switch provider (prompts for API key if needed)
29//! - `/help` - Show available commands
30//! - `/clear` - Clear conversation history
31//! - `/exit` - Exit the chat
32
33pub mod commands;
34pub mod history;
35pub mod prompts;
36pub mod session;
37pub mod tools;
38pub mod ui;
39
40use colored::Colorize;
41use history::{ConversationHistory, ToolCallRecord};
42use rig::{
43    client::{CompletionClient, ProviderClient},
44    completion::Prompt,
45    providers::{anthropic, openai},
46};
47use session::ChatSession;
48use commands::TokenUsage;
49use std::path::Path;
50use ui::{ResponseFormatter, ToolDisplayHook};
51
52/// Provider type for the agent
53#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
54pub enum ProviderType {
55    #[default]
56    OpenAI,
57    Anthropic,
58}
59
60impl std::fmt::Display for ProviderType {
61    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
62        match self {
63            ProviderType::OpenAI => write!(f, "openai"),
64            ProviderType::Anthropic => write!(f, "anthropic"),
65        }
66    }
67}
68
69impl std::str::FromStr for ProviderType {
70    type Err = String;
71
72    fn from_str(s: &str) -> Result<Self, Self::Err> {
73        match s.to_lowercase().as_str() {
74            "openai" => Ok(ProviderType::OpenAI),
75            "anthropic" => Ok(ProviderType::Anthropic),
76            _ => Err(format!("Unknown provider: {}", s)),
77        }
78    }
79}
80
81/// Error types for the agent
82#[derive(Debug, thiserror::Error)]
83pub enum AgentError {
84    #[error("Missing API key. Set {0} environment variable.")]
85    MissingApiKey(String),
86
87    #[error("Provider error: {0}")]
88    ProviderError(String),
89
90    #[error("Tool error: {0}")]
91    ToolError(String),
92}
93
94pub type AgentResult<T> = Result<T, AgentError>;
95
96/// Get the system prompt for the agent based on query type
97fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
98    // If query suggests generation (Docker, Terraform, Helm), use DevOps prompt
99    if let Some(q) = query {
100        if prompts::is_generation_query(q) {
101            return prompts::get_devops_prompt(project_path);
102        }
103    }
104    // Default to analysis prompt
105    prompts::get_analysis_prompt(project_path)
106}
107
108/// Run the agent in interactive mode with custom REPL supporting /model and /provider commands
109pub async fn run_interactive(
110    project_path: &Path,
111    provider: ProviderType,
112    model: Option<String>,
113) -> AgentResult<()> {
114    use tools::*;
115
116    let mut session = ChatSession::new(project_path, provider, model);
117
118    // Initialize conversation history with compaction support
119    let mut conversation_history = ConversationHistory::new();
120
121    // Load API key from config file to env if not already set
122    ChatSession::load_api_key_to_env(session.provider);
123
124    // Check if API key is configured, prompt if not
125    if !ChatSession::has_api_key(session.provider) {
126        ChatSession::prompt_api_key(session.provider)?;
127    }
128
129    session.print_banner();
130
131    loop {
132        // Show conversation status if we have history
133        if !conversation_history.is_empty() {
134            println!("{}", format!("  💬 Context: {}", conversation_history.status()).dimmed());
135        }
136
137        // Read user input
138        let input = match session.read_input() {
139            Ok(input) => input,
140            Err(_) => break,
141        };
142
143        if input.is_empty() {
144            continue;
145        }
146
147        // Check for commands
148        if ChatSession::is_command(&input) {
149            // Special handling for /clear to also clear conversation history
150            if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
151                conversation_history.clear();
152            }
153            match session.process_command(&input) {
154                Ok(true) => continue,
155                Ok(false) => break, // /exit
156                Err(e) => {
157                    eprintln!("{}", format!("Error: {}", e).red());
158                    continue;
159                }
160            }
161        }
162
163        // Check API key before making request (in case provider changed)
164        if !ChatSession::has_api_key(session.provider) {
165            eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
166            continue;
167        }
168
169        // Check if compaction is needed before making the request
170        if conversation_history.needs_compaction() {
171            println!("{}", "  📦 Compacting conversation history...".dimmed());
172            if let Some(summary) = conversation_history.compact() {
173                println!("{}", format!("  ✓ Compressed {} turns", summary.matches("Turn").count()).dimmed());
174            }
175        }
176
177        // Create hook for Claude Code style tool display
178        let hook = ToolDisplayHook::new();
179
180        let project_path_buf = session.project_path.clone();
181        // Select prompt based on query type (analysis vs generation)
182        let preamble = get_system_prompt(&session.project_path, Some(&input));
183        let is_generation = prompts::is_generation_query(&input);
184
185        // Convert conversation history to Rig Message format
186        let mut chat_history = conversation_history.to_messages();
187
188        let response = match session.provider {
189            ProviderType::OpenAI => {
190                let client = openai::Client::from_env();
191                // For GPT-5.x reasoning models, enable reasoning with summary output
192                // so we can see the model's thinking process
193                let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
194                    Some(serde_json::json!({
195                        "reasoning": {
196                            "effort": "medium",
197                            "summary": "detailed"
198                        }
199                    }))
200                } else {
201                    None
202                };
203
204                let mut builder = client
205                    .agent(&session.model)
206                    .preamble(&preamble)
207                    .max_tokens(4096)
208                    .tool(AnalyzeTool::new(project_path_buf.clone()))
209                    .tool(SecurityScanTool::new(project_path_buf.clone()))
210                    .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
211                    .tool(ReadFileTool::new(project_path_buf.clone()))
212                    .tool(ListDirectoryTool::new(project_path_buf.clone()));
213
214                // Add generation tools if this is a generation query
215                if is_generation {
216                    builder = builder
217                        .tool(WriteFileTool::new(project_path_buf.clone()))
218                        .tool(WriteFilesTool::new(project_path_buf.clone()))
219                        .tool(ShellTool::new(project_path_buf.clone()));
220                }
221
222                if let Some(params) = reasoning_params {
223                    builder = builder.additional_params(params);
224                }
225
226                let agent = builder.build();
227                // Allow up to 50 tool call turns for complex generation tasks
228                // Use hook to display tool calls as they happen
229                // Pass conversation history for context continuity
230                agent.prompt(&input)
231                    .with_history(&mut chat_history)
232                    .with_hook(hook.clone())
233                    .multi_turn(50)
234                    .await
235            }
236            ProviderType::Anthropic => {
237                let client = anthropic::Client::from_env();
238                let mut builder = client
239                    .agent(&session.model)
240                    .preamble(&preamble)
241                    .max_tokens(4096)
242                    .tool(AnalyzeTool::new(project_path_buf.clone()))
243                    .tool(SecurityScanTool::new(project_path_buf.clone()))
244                    .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
245                    .tool(ReadFileTool::new(project_path_buf.clone()))
246                    .tool(ListDirectoryTool::new(project_path_buf.clone()));
247
248                // Add generation tools if this is a generation query
249                if is_generation {
250                    builder = builder
251                        .tool(WriteFileTool::new(project_path_buf.clone()))
252                        .tool(WriteFilesTool::new(project_path_buf.clone()))
253                        .tool(ShellTool::new(project_path_buf.clone()));
254                }
255
256                let agent = builder.build();
257
258                // Allow up to 50 tool call turns for complex generation tasks
259                // Use hook to display tool calls as they happen
260                // Pass conversation history for context continuity
261                agent.prompt(&input)
262                    .with_history(&mut chat_history)
263                    .with_hook(hook.clone())
264                    .multi_turn(50)
265                    .await
266            }
267        };
268
269        match response {
270            Ok(text) => {
271                // Show final response
272                println!();
273                ResponseFormatter::print_response(&text);
274
275                // Track token usage (estimate since Rig doesn't expose exact counts)
276                let prompt_tokens = TokenUsage::estimate_tokens(&input);
277                let completion_tokens = TokenUsage::estimate_tokens(&text);
278                session.token_usage.add_request(prompt_tokens, completion_tokens);
279
280                // Extract tool calls from the hook state for history tracking
281                let tool_calls = extract_tool_calls_from_hook(&hook).await;
282
283                // Add to conversation history with tool call records
284                conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
285
286                // Also update legacy session history for compatibility
287                session.history.push(("user".to_string(), input));
288                session.history.push(("assistant".to_string(), text));
289            }
290            Err(e) => {
291                let err_str = e.to_string();
292                println!();
293                // Check if this is a max depth error
294                if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
295                    eprintln!("{}", "Reached tool call limit (50 turns).".yellow());
296                    eprintln!("{}", "Type 'continue' to resume, or ask a new question.".dimmed());
297                } else {
298                    eprintln!("{}", format!("Error: {}", e).red());
299                }
300            }
301        }
302        println!();
303    }
304
305    Ok(())
306}
307
308/// Extract tool call records from the hook state for history tracking
309async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
310    let state = hook.state();
311    let guard = state.lock().await;
312
313    guard.tool_calls.iter().map(|tc| {
314        ToolCallRecord {
315            tool_name: tc.name.clone(),
316            args_summary: truncate_string(&tc.args, 100),
317            result_summary: tc.output.as_ref()
318                .map(|o| truncate_string(o, 200))
319                .unwrap_or_else(|| "completed".to_string()),
320        }
321    }).collect()
322}
323
324/// Helper to truncate strings for summaries
325fn truncate_string(s: &str, max_len: usize) -> String {
326    if s.len() <= max_len {
327        s.to_string()
328    } else {
329        format!("{}...", &s[..max_len.saturating_sub(3)])
330    }
331}
332
333/// Run a single query and return the response
334pub async fn run_query(
335    project_path: &Path,
336    query: &str,
337    provider: ProviderType,
338    model: Option<String>,
339) -> AgentResult<String> {
340    use tools::*;
341
342    let project_path_buf = project_path.to_path_buf();
343    // Select prompt based on query type (analysis vs generation)
344    let preamble = get_system_prompt(project_path, Some(query));
345    let is_generation = prompts::is_generation_query(query);
346
347    match provider {
348        ProviderType::OpenAI => {
349            let client = openai::Client::from_env();
350            let model_name = model.as_deref().unwrap_or("gpt-5.2");
351
352            // For GPT-5.x reasoning models, enable reasoning with summary output
353            let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
354                Some(serde_json::json!({
355                    "reasoning": {
356                        "effort": "medium",
357                        "summary": "detailed"
358                    }
359                }))
360            } else {
361                None
362            };
363
364            let mut builder = client
365                .agent(model_name)
366                .preamble(&preamble)
367                .max_tokens(4096)
368                .tool(AnalyzeTool::new(project_path_buf.clone()))
369                .tool(SecurityScanTool::new(project_path_buf.clone()))
370                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
371                .tool(ReadFileTool::new(project_path_buf.clone()))
372                .tool(ListDirectoryTool::new(project_path_buf.clone()));
373
374            // Add generation tools if this is a generation query
375            if is_generation {
376                builder = builder
377                    .tool(WriteFileTool::new(project_path_buf.clone()))
378                    .tool(WriteFilesTool::new(project_path_buf.clone()))
379                    .tool(ShellTool::new(project_path_buf.clone()));
380            }
381
382            if let Some(params) = reasoning_params {
383                builder = builder.additional_params(params);
384            }
385
386            let agent = builder.build();
387
388            agent
389                .prompt(query)
390                .multi_turn(50)
391                .await
392                .map_err(|e| AgentError::ProviderError(e.to_string()))
393        }
394        ProviderType::Anthropic => {
395            let client = anthropic::Client::from_env();
396            let model_name = model.as_deref().unwrap_or("claude-sonnet-4-20250514");
397
398            let mut builder = client
399                .agent(model_name)
400                .preamble(&preamble)
401                .max_tokens(4096)
402                .tool(AnalyzeTool::new(project_path_buf.clone()))
403                .tool(SecurityScanTool::new(project_path_buf.clone()))
404                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
405                .tool(ReadFileTool::new(project_path_buf.clone()))
406                .tool(ListDirectoryTool::new(project_path_buf.clone()));
407
408            // Add generation tools if this is a generation query
409            if is_generation {
410                builder = builder
411                    .tool(WriteFileTool::new(project_path_buf.clone()))
412                    .tool(WriteFilesTool::new(project_path_buf.clone()))
413                    .tool(ShellTool::new(project_path_buf.clone()));
414            }
415
416            let agent = builder.build();
417
418            agent
419                .prompt(query)
420                .multi_turn(50)
421                .await
422                .map_err(|e| AgentError::ProviderError(e.to_string()))
423        }
424    }
425}