syncable_cli/agent/
mod.rs

1//! Agent module for interactive AI-powered CLI assistance
2//!
3//! This module provides an agent layer using the Rig library that allows users
4//! to interact with the CLI through natural language conversations.
5//!
6//! # Usage
7//!
8//! ```bash
9//! # Interactive mode
10//! sync-ctl chat
11//!
12//! # With specific provider
13//! sync-ctl chat --provider openai --model gpt-4o
14//!
15//! # Single query
16//! sync-ctl chat -q "What security issues does this project have?"
17//! ```
18
19pub mod config;
20pub mod tools;
21pub mod ui;
22
23use futures::StreamExt;
24use rig::{
25    agent::MultiTurnStreamItem,
26    client::{CompletionClient, ProviderClient},
27    completion::{Message, Prompt},
28    providers::{anthropic, openai},
29    streaming::{StreamedAssistantContent, StreamingChat},
30};
31use std::io::{self, BufRead, Write};
32use std::path::Path;
33
34/// Provider type for the agent
35#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
36pub enum ProviderType {
37    #[default]
38    OpenAI,
39    Anthropic,
40}
41
42impl std::fmt::Display for ProviderType {
43    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
44        match self {
45            ProviderType::OpenAI => write!(f, "openai"),
46            ProviderType::Anthropic => write!(f, "anthropic"),
47        }
48    }
49}
50
51impl std::str::FromStr for ProviderType {
52    type Err = String;
53
54    fn from_str(s: &str) -> Result<Self, Self::Err> {
55        match s.to_lowercase().as_str() {
56            "openai" => Ok(ProviderType::OpenAI),
57            "anthropic" => Ok(ProviderType::Anthropic),
58            _ => Err(format!("Unknown provider: {}", s)),
59        }
60    }
61}
62
63/// Error types for the agent
64#[derive(Debug, thiserror::Error)]
65pub enum AgentError {
66    #[error("Missing API key. Set {0} environment variable.")]
67    MissingApiKey(String),
68
69    #[error("Provider error: {0}")]
70    ProviderError(String),
71
72    #[error("Tool error: {0}")]
73    ToolError(String),
74
75    #[error("Client initialization error: {0}")]
76    ClientError(String),
77}
78
79pub type AgentResult<T> = Result<T, AgentError>;
80
81/// Get the system prompt for the agent
82fn get_system_prompt(project_path: &Path) -> String {
83    format!(
84        r#"You are an expert AI coding assistant integrated into the Syncable CLI. You help developers understand, navigate, and improve their codebases through deep, thorough investigation.
85
86## Project Context
87Project location: {}
88
89## Your Tools
90
91### 🏗️ MONOREPO DISCOVERY (USE FIRST!)
92- **discover_services** - **START HERE for monorepos!** Lists ALL services/packages with their:
93  - Names, paths, types (Next.js, Express, Rust binary, etc.)
94  - Frameworks detected (React, Prisma, tRPC, etc.)
95  - Workspace configuration
96  - Use `path: "apps"` or `path: "services"` to focus on specific areas
97
98### 🔍 DEEP ANALYSIS
99- **analyze_project** - Comprehensive analysis of a specific project
100  - **ALWAYS specify `path`** to analyze individual services: `path: "apps/api"`
101  - `mode: "json"` - Structured data (default, best for parsing)
102  - `mode: "detailed"` - Full analysis with Docker info
103  - **For monorepos: Call this MULTIPLE TIMES with different paths!**
104
105### 🔎 CODE SEARCH
106- **search_code** - Grep-like search across files
107  - `pattern: "function_name"` - Find where things are defined/used
108  - `path: "apps/api"` - Search within specific service
109  - `regex: true` - Enable regex patterns
110  - `extension: "ts"` - Filter by file type
111  - `max_results: 100` - Increase for thorough search
112
113- **find_files** - Find files by name/pattern
114  - `pattern: "*.config.*"` - Find all config files
115  - `pattern: "Dockerfile*"` - Find Dockerfiles
116  - `include_dirs: true` - Include directories
117
118- **read_file** - Read actual file contents
119  - Use after finding files to see implementation details
120  - `start_line`/`end_line` - Read specific sections
121
122- **list_directory** - Explore directory structure
123  - `recursive: true` - See nested structure
124
125### 🛡️ SECURITY
126- **security_scan** - Find secrets, hardcoded credentials, security issues
127- **check_vulnerabilities** - Check dependencies for known CVEs
128
129### 📦 GENERATION
130- **generate_iac** - Generate Infrastructure as Code
131  - `path: "apps/api"` - Generate for specific service
132  - `generate_type: "dockerfile" | "compose" | "terraform" | "all"`
133
134## AGENTIC INVESTIGATION PROTOCOL
135
136You are a DEEPLY INVESTIGATIVE agent. You have up to 300 tool calls - USE THEM!
137
138### For Monorepos (multiple services/packages):
1391. **ALWAYS start with `discover_services`** to map the entire structure
1402. **Analyze EACH relevant service individually** with `analyze_project(path: "service/path")`
1413. **Search across the monorepo** for patterns, shared code, cross-service dependencies
1424. **Read key files** in each service (entry points, configs, main logic)
1435. **Cross-reference** - how do services communicate? What's shared?
144
145### For Deep Investigation:
1461. **Don't stop at surface level** - dig into implementation
1472. **Follow the code** - if you find a function call, search for its definition
1483. **Check configs** - look for .env files, config directories, environment setup
1494. **Examine dependencies** - package.json, Cargo.toml, what's being used?
1505. **Read actual source code** - use read_file to understand implementation
151
152### Investigation Mindset:
153- "I found 5 services, let me analyze each one..."
154- "The API uses Express, let me find the route definitions..."
155- "This imports from ../shared, let me explore that directory..."
156- "There's a database connection, let me find the schema..."
157- "I see tRPC, let me find the router definitions..."
158
159## Response Guidelines
160- NEVER answer without thorough investigation first
161- Show your exploration: "Discovering services... Found 5 apps. Analyzing apps/api..."
162- For each service: summarize its purpose, tech stack, key files
163- When asked to investigate: USE MANY TOOLS, explore deeply
164- Format code with ```language blocks
165- Be specific: "In apps/api/src/routes/users.ts line 45..." 
166- Don't guess - if you're uncertain, explore more!"#,
167        project_path.display()
168    )
169}
170
171/// Run the agent in interactive mode with beautiful UI
172pub async fn run_interactive(
173    project_path: &Path,
174    provider: ProviderType,
175    model: Option<String>,
176) -> AgentResult<()> {
177    use tools::*;
178    use ui::AgentUI;
179
180    let project_path_buf = project_path.to_path_buf();
181    let preamble = get_system_prompt(project_path);
182    let mut ui = AgentUI::new();
183    let mut chat_history: Vec<Message> = Vec::new();
184
185    let provider_name = match provider {
186        ProviderType::OpenAI => "OpenAI",
187        ProviderType::Anthropic => "Anthropic",
188    };
189
190    match provider {
191        ProviderType::OpenAI => {
192            let client = openai::Client::from_env();
193            let model_name = model.as_deref().unwrap_or("gpt-4o");
194
195            let agent = client
196                .agent(model_name)
197                .preamble(&preamble)
198                .max_tokens(4096)
199                .tool(DiscoverServicesTool::new(project_path_buf.clone()))
200                .tool(AnalyzeTool::new(project_path_buf.clone()))
201                .tool(SecurityScanTool::new(project_path_buf.clone()))
202                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
203                .tool(ReadFileTool::new(project_path_buf.clone()))
204                .tool(ListDirectoryTool::new(project_path_buf.clone()))
205                .tool(SearchCodeTool::new(project_path_buf.clone()))
206                .tool(FindFilesTool::new(project_path_buf.clone()))
207                .tool(GenerateIaCTool::new(project_path_buf.clone()))
208                .build();
209
210            ui.print_welcome(provider_name, model_name);
211
212            // Custom chat loop with streaming
213            loop {
214                ui.print_prompt();
215                io::stdout().flush().ok();
216
217                let mut input = String::new();
218                if io::stdin().lock().read_line(&mut input).is_err() {
219                    break;
220                }
221
222                let input = input.trim();
223                if input.is_empty() {
224                    continue;
225                }
226                if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") {
227                    println!("\n  {} Goodbye!\n", ui::SPARKLES);
228                    break;
229                }
230
231                ui.start_thinking();
232
233                // Use streaming chat with multi-turn enabled for tool calls
234                let mut stream = agent.stream_chat(input, chat_history.clone()).multi_turn(300).await;
235                ui.stop_thinking();
236                ui.print_assistant_header();
237                ui.start_streaming();
238
239                let mut full_response = String::new();
240                let mut had_tool_calls = false;
241                let mut last_update = 0;
242
243                while let Some(chunk) = stream.next().await {
244                    match chunk {
245                        Ok(MultiTurnStreamItem::StreamAssistantItem(StreamedAssistantContent::Text(text))) => {
246                            full_response.push_str(&text.text);
247                            // Update progress every 50 chars
248                            if full_response.len() - last_update > 50 {
249                                ui.update_streaming(full_response.len());
250                                last_update = full_response.len();
251                            }
252                        }
253                        Ok(MultiTurnStreamItem::StreamAssistantItem(StreamedAssistantContent::ToolCall(tool_call))) => {
254                            had_tool_calls = true;
255                            ui.pause_spinner();
256                            ui.print_tool_call_notification(&tool_call.function.name);
257                            ui.print_tool_call_complete(&tool_call.function.name);
258                            ui.start_streaming();
259                        }
260                        Ok(MultiTurnStreamItem::StreamAssistantItem(_)) => {}
261                        Ok(MultiTurnStreamItem::StreamUserItem(_)) => {}
262                        Ok(MultiTurnStreamItem::FinalResponse(_)) => {}
263                        Err(e) => {
264                            ui.print_error(&format!("Stream error: {}", e));
265                            break;
266                        }
267                        _ => {}
268                    }
269                }
270
271                // Render the complete response with markdown
272                ui.finish_streaming_and_render(&full_response);
273
274                // Update chat history
275                if !full_response.is_empty() || had_tool_calls {
276                    chat_history.push(Message::user(input));
277                    chat_history.push(Message::assistant(&full_response));
278                }
279            }
280        }
281        ProviderType::Anthropic => {
282            let client = anthropic::Client::from_env();
283            let model_name = model.as_deref().unwrap_or("claude-3-5-sonnet-latest");
284
285            let agent = client
286                .agent(model_name)
287                .preamble(&preamble)
288                .max_tokens(4096)
289                .tool(DiscoverServicesTool::new(project_path_buf.clone()))
290                .tool(AnalyzeTool::new(project_path_buf.clone()))
291                .tool(SecurityScanTool::new(project_path_buf.clone()))
292                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
293                .tool(ReadFileTool::new(project_path_buf.clone()))
294                .tool(ListDirectoryTool::new(project_path_buf.clone()))
295                .tool(SearchCodeTool::new(project_path_buf.clone()))
296                .tool(FindFilesTool::new(project_path_buf.clone()))
297                .tool(GenerateIaCTool::new(project_path_buf.clone()))
298                .build();
299
300            ui.print_welcome(provider_name, model_name);
301
302            // Custom chat loop with streaming
303            loop {
304                ui.print_prompt();
305                io::stdout().flush().ok();
306
307                let mut input = String::new();
308                if io::stdin().lock().read_line(&mut input).is_err() {
309                    break;
310                }
311
312                let input = input.trim();
313                if input.is_empty() {
314                    continue;
315                }
316                if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") {
317                    println!("\n  {} Goodbye!\n", ui::SPARKLES);
318                    break;
319                }
320
321                ui.start_thinking();
322
323                // Use streaming chat with multi-turn enabled for tool calls
324                let mut stream = agent.stream_chat(input, chat_history.clone()).multi_turn(300).await;
325                ui.stop_thinking();
326                ui.print_assistant_header();
327                ui.start_streaming();
328
329                let mut full_response = String::new();
330                let mut had_tool_calls = false;
331                let mut last_update = 0;
332
333                while let Some(chunk) = stream.next().await {
334                    match chunk {
335                        Ok(MultiTurnStreamItem::StreamAssistantItem(StreamedAssistantContent::Text(text))) => {
336                            full_response.push_str(&text.text);
337                            // Update progress every 50 chars
338                            if full_response.len() - last_update > 50 {
339                                ui.update_streaming(full_response.len());
340                                last_update = full_response.len();
341                            }
342                        }
343                        Ok(MultiTurnStreamItem::StreamAssistantItem(StreamedAssistantContent::ToolCall(tool_call))) => {
344                            had_tool_calls = true;
345                            ui.pause_spinner();
346                            ui.print_tool_call_notification(&tool_call.function.name);
347                            ui.print_tool_call_complete(&tool_call.function.name);
348                            ui.start_streaming();
349                        }
350                        Ok(MultiTurnStreamItem::StreamAssistantItem(_)) => {}
351                        Ok(MultiTurnStreamItem::StreamUserItem(_)) => {}
352                        Ok(MultiTurnStreamItem::FinalResponse(_)) => {}
353                        Err(e) => {
354                            ui.print_error(&format!("Stream error: {}", e));
355                            break;
356                        }
357                        _ => {}
358                    }
359                }
360
361                // Render the complete response with markdown
362                ui.finish_streaming_and_render(&full_response);
363
364                // Update chat history
365                if !full_response.is_empty() || had_tool_calls {
366                    chat_history.push(Message::user(input));
367                    chat_history.push(Message::assistant(&full_response));
368                }
369            }
370        }
371    }
372
373    Ok(())
374}
375
376/// Run a single query and return the response
377pub async fn run_query(
378    project_path: &Path,
379    query: &str,
380    provider: ProviderType,
381    model: Option<String>,
382) -> AgentResult<String> {
383    use tools::*;
384
385    let project_path_buf = project_path.to_path_buf();
386    let preamble = get_system_prompt(project_path);
387
388    match provider {
389        ProviderType::OpenAI => {
390            let client = openai::Client::from_env();
391            let model_name = model.as_deref().unwrap_or("gpt-4o");
392
393            let agent = client
394                .agent(model_name)
395                .preamble(&preamble)
396                .max_tokens(4096)
397                .tool(DiscoverServicesTool::new(project_path_buf.clone()))
398                .tool(AnalyzeTool::new(project_path_buf.clone()))
399                .tool(SecurityScanTool::new(project_path_buf.clone()))
400                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
401                .tool(ReadFileTool::new(project_path_buf.clone()))
402                .tool(ListDirectoryTool::new(project_path_buf.clone()))
403                .tool(SearchCodeTool::new(project_path_buf.clone()))
404                .tool(FindFilesTool::new(project_path_buf.clone()))
405                .tool(GenerateIaCTool::new(project_path_buf))
406                .build();
407
408            agent
409                .prompt(query)
410                .await
411                .map_err(|e| AgentError::ProviderError(e.to_string()))
412        }
413        ProviderType::Anthropic => {
414            let client = anthropic::Client::from_env();
415            let model_name = model.as_deref().unwrap_or("claude-3-5-sonnet-latest");
416
417            let agent = client
418                .agent(model_name)
419                .preamble(&preamble)
420                .max_tokens(4096)
421                .tool(DiscoverServicesTool::new(project_path_buf.clone()))
422                .tool(AnalyzeTool::new(project_path_buf.clone()))
423                .tool(SecurityScanTool::new(project_path_buf.clone()))
424                .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
425                .tool(ReadFileTool::new(project_path_buf.clone()))
426                .tool(ListDirectoryTool::new(project_path_buf.clone()))
427                .tool(SearchCodeTool::new(project_path_buf.clone()))
428                .tool(FindFilesTool::new(project_path_buf.clone()))
429                .tool(GenerateIaCTool::new(project_path_buf))
430                .build();
431
432            agent
433                .prompt(query)
434                .await
435                .map_err(|e| AgentError::ProviderError(e.to_string()))
436        }
437    }
438}