1pub mod commands;
34pub mod history;
35pub mod ide;
36pub mod prompts;
37pub mod session;
38pub mod tools;
39pub mod ui;
40
41use colored::Colorize;
42use history::{ConversationHistory, ToolCallRecord};
43use ide::IdeClient;
44use rig::{
45 client::{CompletionClient, ProviderClient},
46 completion::Prompt,
47 providers::{anthropic, openai},
48};
49use session::ChatSession;
50use commands::TokenUsage;
51use std::path::Path;
52use std::sync::Arc;
53use tokio::sync::Mutex as TokioMutex;
54use ui::{ResponseFormatter, ToolDisplayHook};
55
56#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
58pub enum ProviderType {
59 #[default]
60 OpenAI,
61 Anthropic,
62}
63
64impl std::fmt::Display for ProviderType {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 match self {
67 ProviderType::OpenAI => write!(f, "openai"),
68 ProviderType::Anthropic => write!(f, "anthropic"),
69 }
70 }
71}
72
73impl std::str::FromStr for ProviderType {
74 type Err = String;
75
76 fn from_str(s: &str) -> Result<Self, Self::Err> {
77 match s.to_lowercase().as_str() {
78 "openai" => Ok(ProviderType::OpenAI),
79 "anthropic" => Ok(ProviderType::Anthropic),
80 _ => Err(format!("Unknown provider: {}", s)),
81 }
82 }
83}
84
85#[derive(Debug, thiserror::Error)]
87pub enum AgentError {
88 #[error("Missing API key. Set {0} environment variable.")]
89 MissingApiKey(String),
90
91 #[error("Provider error: {0}")]
92 ProviderError(String),
93
94 #[error("Tool error: {0}")]
95 ToolError(String),
96}
97
98pub type AgentResult<T> = Result<T, AgentError>;
99
100fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
102 if let Some(q) = query {
104 if prompts::is_generation_query(q) {
105 return prompts::get_devops_prompt(project_path);
106 }
107 }
108 prompts::get_analysis_prompt(project_path)
110}
111
112pub async fn run_interactive(
114 project_path: &Path,
115 provider: ProviderType,
116 model: Option<String>,
117) -> AgentResult<()> {
118 use tools::*;
119
120 let mut session = ChatSession::new(project_path, provider, model);
121
122 let mut conversation_history = ConversationHistory::new();
124
125 let ide_client: Option<Arc<TokioMutex<IdeClient>>> = {
127 let mut client = IdeClient::new().await;
128 if client.is_ide_available() {
129 match client.connect().await {
130 Ok(()) => {
131 println!(
132 "{} Connected to {} IDE companion",
133 "โ".green(),
134 client.ide_name().unwrap_or("VS Code")
135 );
136 Some(Arc::new(TokioMutex::new(client)))
137 }
138 Err(e) => {
139 println!(
141 "{} IDE companion not connected: {}",
142 "!".yellow(),
143 e
144 );
145 None
146 }
147 }
148 } else {
149 println!("{} No IDE detected (TERM_PROGRAM={})", "ยท".dimmed(), std::env::var("TERM_PROGRAM").unwrap_or_default());
150 None
151 }
152 };
153
154 ChatSession::load_api_key_to_env(session.provider);
156
157 if !ChatSession::has_api_key(session.provider) {
159 ChatSession::prompt_api_key(session.provider)?;
160 }
161
162 session.print_banner();
163
164 loop {
165 if !conversation_history.is_empty() {
167 println!("{}", format!(" ๐ฌ Context: {}", conversation_history.status()).dimmed());
168 }
169
170 let input = match session.read_input() {
172 Ok(input) => input,
173 Err(_) => break,
174 };
175
176 if input.is_empty() {
177 continue;
178 }
179
180 if ChatSession::is_command(&input) {
182 if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
184 conversation_history.clear();
185 }
186 match session.process_command(&input) {
187 Ok(true) => continue,
188 Ok(false) => break, Err(e) => {
190 eprintln!("{}", format!("Error: {}", e).red());
191 continue;
192 }
193 }
194 }
195
196 if !ChatSession::has_api_key(session.provider) {
198 eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
199 continue;
200 }
201
202 if conversation_history.needs_compaction() {
204 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
205 if let Some(summary) = conversation_history.compact() {
206 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
207 }
208 }
209
210 const MAX_RETRIES: u32 = 3;
216 const MAX_CONTINUATIONS: u32 = 10;
217 const TOOL_CALL_CHECKPOINT: usize = 50;
218 const MAX_TOOL_CALLS: usize = 300;
219 let mut retry_attempt = 0;
220 let mut continuation_count = 0;
221 let mut total_tool_calls: usize = 0;
222 let mut auto_continue_tools = false; let mut current_input = input.clone();
224 let mut succeeded = false;
225
226 while retry_attempt < MAX_RETRIES && continuation_count < MAX_CONTINUATIONS && !succeeded {
227
228 if continuation_count > 0 {
230 eprintln!("{}", format!(" ๐ก Sending continuation request...").dimmed());
231 }
232
233 let hook = ToolDisplayHook::new();
235
236 let project_path_buf = session.project_path.clone();
237 let preamble = get_system_prompt(&session.project_path, Some(¤t_input));
239 let is_generation = prompts::is_generation_query(¤t_input);
240
241 let mut chat_history = conversation_history.to_messages();
243
244 let response = match session.provider {
245 ProviderType::OpenAI => {
246 let client = openai::Client::from_env();
247 let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
250 Some(serde_json::json!({
251 "reasoning": {
252 "effort": "medium",
253 "summary": "detailed"
254 }
255 }))
256 } else {
257 None
258 };
259
260 let mut builder = client
261 .agent(&session.model)
262 .preamble(&preamble)
263 .max_tokens(4096)
264 .tool(AnalyzeTool::new(project_path_buf.clone()))
265 .tool(SecurityScanTool::new(project_path_buf.clone()))
266 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
267 .tool(ReadFileTool::new(project_path_buf.clone()))
268 .tool(ListDirectoryTool::new(project_path_buf.clone()));
269
270 if is_generation {
272 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
274 (
275 WriteFileTool::new(project_path_buf.clone())
276 .with_ide_client(client.clone()),
277 WriteFilesTool::new(project_path_buf.clone())
278 .with_ide_client(client.clone()),
279 )
280 } else {
281 (
282 WriteFileTool::new(project_path_buf.clone()),
283 WriteFilesTool::new(project_path_buf.clone()),
284 )
285 };
286 builder = builder
287 .tool(write_file_tool)
288 .tool(write_files_tool)
289 .tool(ShellTool::new(project_path_buf.clone()));
290 }
291
292 if let Some(params) = reasoning_params {
293 builder = builder.additional_params(params);
294 }
295
296 let agent = builder.build();
297 agent.prompt(¤t_input)
301 .with_history(&mut chat_history)
302 .with_hook(hook.clone())
303 .multi_turn(50)
304 .await
305 }
306 ProviderType::Anthropic => {
307 let client = anthropic::Client::from_env();
308 let mut builder = client
309 .agent(&session.model)
310 .preamble(&preamble)
311 .max_tokens(4096)
312 .tool(AnalyzeTool::new(project_path_buf.clone()))
313 .tool(SecurityScanTool::new(project_path_buf.clone()))
314 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
315 .tool(ReadFileTool::new(project_path_buf.clone()))
316 .tool(ListDirectoryTool::new(project_path_buf.clone()));
317
318 if is_generation {
320 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
322 (
323 WriteFileTool::new(project_path_buf.clone())
324 .with_ide_client(client.clone()),
325 WriteFilesTool::new(project_path_buf.clone())
326 .with_ide_client(client.clone()),
327 )
328 } else {
329 (
330 WriteFileTool::new(project_path_buf.clone()),
331 WriteFilesTool::new(project_path_buf.clone()),
332 )
333 };
334 builder = builder
335 .tool(write_file_tool)
336 .tool(write_files_tool)
337 .tool(ShellTool::new(project_path_buf.clone()));
338 }
339
340 let agent = builder.build();
341
342 agent.prompt(¤t_input)
346 .with_history(&mut chat_history)
347 .with_hook(hook.clone())
348 .multi_turn(50)
349 .await
350 }
351 };
352
353 match response {
354 Ok(text) => {
355 println!();
357 ResponseFormatter::print_response(&text);
358
359 let prompt_tokens = TokenUsage::estimate_tokens(&input);
361 let completion_tokens = TokenUsage::estimate_tokens(&text);
362 session.token_usage.add_request(prompt_tokens, completion_tokens);
363
364 let tool_calls = extract_tool_calls_from_hook(&hook).await;
366 let batch_tool_count = tool_calls.len();
367 total_tool_calls += batch_tool_count;
368
369 if batch_tool_count > 10 {
371 println!("{}", format!(" โ Completed with {} tool calls ({} total this session)", batch_tool_count, total_tool_calls).dimmed());
372 }
373
374 conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
376
377 if conversation_history.needs_compaction() {
380 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
381 if let Some(summary) = conversation_history.compact() {
382 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
383 }
384 }
385
386 session.history.push(("user".to_string(), input.clone()));
388 session.history.push(("assistant".to_string(), text));
389 succeeded = true;
390 }
391 Err(e) => {
392 let err_str = e.to_string();
393
394 println!();
395
396 if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
398 let completed_tools = extract_tool_calls_from_hook(&hook).await;
400 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
401 let batch_tool_count = completed_tools.len();
402 total_tool_calls += batch_tool_count;
403
404 eprintln!("{}", format!(
405 "โ Reached {} tool calls this batch ({} total). Maximum allowed: {}",
406 batch_tool_count, total_tool_calls, MAX_TOOL_CALLS
407 ).yellow());
408
409 if total_tool_calls >= MAX_TOOL_CALLS {
411 eprintln!("{}", format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS).red());
412 eprintln!("{}", "The task is too complex. Try breaking it into smaller parts.".dimmed());
413 break;
414 }
415
416 let should_continue = if auto_continue_tools {
418 eprintln!("{}", " Auto-continuing (you selected 'always')...".dimmed());
419 true
420 } else {
421 eprintln!("{}", "Excessive tool calls used. Want to continue?".yellow());
422 eprintln!("{}", " [y] Yes, continue [n] No, stop [a] Always continue".dimmed());
423 print!(" > ");
424 let _ = std::io::Write::flush(&mut std::io::stdout());
425
426 let mut response = String::new();
428 match std::io::stdin().read_line(&mut response) {
429 Ok(_) => {
430 let resp = response.trim().to_lowercase();
431 if resp == "a" || resp == "always" {
432 auto_continue_tools = true;
433 true
434 } else {
435 resp == "y" || resp == "yes" || resp.is_empty()
436 }
437 }
438 Err(_) => false,
439 }
440 };
441
442 if !should_continue {
443 eprintln!("{}", "Stopped by user. Type 'continue' to resume later.".dimmed());
444 if !completed_tools.is_empty() {
446 conversation_history.add_turn(
447 current_input.clone(),
448 format!("[Stopped at checkpoint - {} tools completed]", batch_tool_count),
449 vec![]
450 );
451 }
452 break;
453 }
454
455 eprintln!("{}", format!(
457 " โ Continuing... {} remaining tool calls available",
458 MAX_TOOL_CALLS - total_tool_calls
459 ).dimmed());
460
461 conversation_history.add_turn(
463 current_input.clone(),
464 format!("[Checkpoint - {} tools completed, continuing...]", batch_tool_count),
465 vec![]
466 );
467
468 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
470
471 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
473 continue; } else if err_str.contains("rate") || err_str.contains("Rate") || err_str.contains("429") {
475 eprintln!("{}", "โ Rate limited by API provider.".yellow());
476 retry_attempt += 1;
478 eprintln!("{}", format!(" Waiting 5 seconds before retry ({}/{})...", retry_attempt, MAX_RETRIES).dimmed());
479 tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
480 } else if is_truncation_error(&err_str) {
481 let completed_tools = extract_tool_calls_from_hook(&hook).await;
483 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
484
485 let completed_count = completed_tools.iter()
487 .filter(|t| !t.result_summary.contains("IN PROGRESS"))
488 .count();
489 let in_progress_count = completed_tools.len() - completed_count;
490
491 if !completed_tools.is_empty() && continuation_count < MAX_CONTINUATIONS {
492 continuation_count += 1;
494 let status_msg = if in_progress_count > 0 {
495 format!(
496 "โ Response truncated. {} completed, {} in-progress. Auto-continuing ({}/{})...",
497 completed_count, in_progress_count, continuation_count, MAX_CONTINUATIONS
498 )
499 } else {
500 format!(
501 "โ Response truncated. {} tool calls completed. Auto-continuing ({}/{})...",
502 completed_count, continuation_count, MAX_CONTINUATIONS
503 )
504 };
505 eprintln!("{}", status_msg.yellow());
506
507 conversation_history.add_turn(
512 current_input.clone(),
513 format!("[Partial response - {} tools completed, {} in-progress before truncation. See continuation prompt for details.]",
514 completed_count, in_progress_count),
515 vec![] );
517
518 if conversation_history.needs_compaction() {
521 eprintln!("{}", " ๐ฆ Compacting history before continuation...".dimmed());
522 if let Some(summary) = conversation_history.compact() {
523 eprintln!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
524 }
525 }
526
527 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
529
530 eprintln!("{}", format!(
532 " โ Continuing with {} files read, {} written, {} other actions tracked",
533 completed_tools.iter().filter(|t| t.tool_name == "read_file").count(),
534 completed_tools.iter().filter(|t| t.tool_name == "write_file" || t.tool_name == "write_files").count(),
535 completed_tools.iter().filter(|t| t.tool_name != "read_file" && t.tool_name != "write_file" && t.tool_name != "write_files" && t.tool_name != "list_directory").count()
536 ).dimmed());
537
538 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
540 } else if retry_attempt < MAX_RETRIES {
542 retry_attempt += 1;
544 eprintln!("{}", format!("โ Response error (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
545 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
546 } else {
547 eprintln!("{}", format!("Error: {}", e).red());
549 if continuation_count >= MAX_CONTINUATIONS {
550 eprintln!("{}", format!("Max continuations ({}) reached. The task is too complex for one request.", MAX_CONTINUATIONS).dimmed());
551 } else {
552 eprintln!("{}", "Max retries reached. The response may be too complex.".dimmed());
553 }
554 eprintln!("{}", "Try breaking your request into smaller parts.".dimmed());
555 break;
556 }
557 } else if err_str.contains("timeout") || err_str.contains("Timeout") {
558 retry_attempt += 1;
560 if retry_attempt < MAX_RETRIES {
561 eprintln!("{}", format!("โ Request timed out (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
562 tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
563 } else {
564 eprintln!("{}", "Request timed out. Please try again.".red());
565 break;
566 }
567 } else {
568 eprintln!("{}", format!("Error: {}", e).red());
570 if continuation_count > 0 {
571 eprintln!("{}", format!(" (occurred during continuation attempt {})", continuation_count).dimmed());
572 }
573 eprintln!("{}", "Error details for debugging:".dimmed());
574 eprintln!("{}", format!(" - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES).dimmed());
575 eprintln!("{}", format!(" - continuation_count: {}/{}", continuation_count, MAX_CONTINUATIONS).dimmed());
576 break;
577 }
578 }
579 }
580 }
581 println!();
582 }
583
584 Ok(())
585}
586
587async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
589 let state = hook.state();
590 let guard = state.lock().await;
591
592 guard.tool_calls.iter().enumerate().map(|(i, tc)| {
593 let result = if tc.is_running {
594 "[IN PROGRESS - may need to be re-run]".to_string()
596 } else if let Some(output) = &tc.output {
597 truncate_string(output, 200)
598 } else {
599 "completed".to_string()
600 };
601
602 ToolCallRecord {
603 tool_name: tc.name.clone(),
604 args_summary: truncate_string(&tc.args, 100),
605 result_summary: result,
606 tool_id: Some(format!("tool_{}_{}", tc.name, i)),
608 }
609 }).collect()
610}
611
612async fn extract_agent_messages_from_hook(hook: &ToolDisplayHook) -> Vec<String> {
614 let state = hook.state();
615 let guard = state.lock().await;
616 guard.agent_messages.clone()
617}
618
619fn truncate_string(s: &str, max_len: usize) -> String {
621 if s.len() <= max_len {
622 s.to_string()
623 } else {
624 format!("{}...", &s[..max_len.saturating_sub(3)])
625 }
626}
627
628fn is_truncation_error(err_str: &str) -> bool {
630 err_str.contains("JsonError")
631 || err_str.contains("EOF while parsing")
632 || err_str.contains("JSON")
633 || err_str.contains("unexpected end")
634}
635
636fn build_continuation_prompt(
639 original_task: &str,
640 completed_tools: &[ToolCallRecord],
641 agent_thinking: &[String],
642) -> String {
643 use std::collections::HashSet;
644
645 let mut files_read: HashSet<String> = HashSet::new();
647 let mut files_written: HashSet<String> = HashSet::new();
648 let mut dirs_listed: HashSet<String> = HashSet::new();
649 let mut other_tools: Vec<String> = Vec::new();
650 let mut in_progress: Vec<String> = Vec::new();
651
652 for tool in completed_tools {
653 let is_in_progress = tool.result_summary.contains("IN PROGRESS");
654
655 if is_in_progress {
656 in_progress.push(format!("{}({})", tool.tool_name, tool.args_summary));
657 continue;
658 }
659
660 match tool.tool_name.as_str() {
661 "read_file" => {
662 files_read.insert(tool.args_summary.clone());
664 }
665 "write_file" | "write_files" => {
666 files_written.insert(tool.args_summary.clone());
667 }
668 "list_directory" => {
669 dirs_listed.insert(tool.args_summary.clone());
670 }
671 _ => {
672 other_tools.push(format!("{}({})", tool.tool_name, truncate_string(&tool.args_summary, 40)));
673 }
674 }
675 }
676
677 let mut prompt = format!(
678 "[CONTINUE] Your previous response was interrupted. DO NOT repeat completed work.\n\n\
679 Original task: {}\n",
680 truncate_string(original_task, 500)
681 );
682
683 if !files_read.is_empty() {
685 prompt.push_str("\n== FILES ALREADY READ (do NOT read again) ==\n");
686 for file in &files_read {
687 prompt.push_str(&format!(" - {}\n", file));
688 }
689 }
690
691 if !dirs_listed.is_empty() {
692 prompt.push_str("\n== DIRECTORIES ALREADY LISTED ==\n");
693 for dir in &dirs_listed {
694 prompt.push_str(&format!(" - {}\n", dir));
695 }
696 }
697
698 if !files_written.is_empty() {
699 prompt.push_str("\n== FILES ALREADY WRITTEN ==\n");
700 for file in &files_written {
701 prompt.push_str(&format!(" - {}\n", file));
702 }
703 }
704
705 if !other_tools.is_empty() {
706 prompt.push_str("\n== OTHER COMPLETED ACTIONS ==\n");
707 for tool in other_tools.iter().take(20) {
708 prompt.push_str(&format!(" - {}\n", tool));
709 }
710 if other_tools.len() > 20 {
711 prompt.push_str(&format!(" ... and {} more\n", other_tools.len() - 20));
712 }
713 }
714
715 if !in_progress.is_empty() {
716 prompt.push_str("\n== INTERRUPTED (may need re-run) ==\n");
717 for tool in &in_progress {
718 prompt.push_str(&format!(" โ {}\n", tool));
719 }
720 }
721
722 if !agent_thinking.is_empty() {
724 if let Some(last_thought) = agent_thinking.last() {
725 prompt.push_str(&format!(
726 "\n== YOUR LAST THOUGHTS ==\n\"{}\"\n",
727 truncate_string(last_thought, 300)
728 ));
729 }
730 }
731
732 prompt.push_str("\n== INSTRUCTIONS ==\n");
733 prompt.push_str("IMPORTANT: Your previous response was too long and got cut off.\n");
734 prompt.push_str("1. Do NOT re-read files listed above - they are already in context.\n");
735 prompt.push_str("2. If writing a document, write it in SECTIONS - complete one section now, then continue.\n");
736 prompt.push_str("3. Keep your response SHORT and focused. Better to complete small chunks than fail on large ones.\n");
737 prompt.push_str("4. If the task involves writing a file, START WRITING NOW - don't explain what you'll do.\n");
738
739 prompt
740}
741
742pub async fn run_query(
744 project_path: &Path,
745 query: &str,
746 provider: ProviderType,
747 model: Option<String>,
748) -> AgentResult<String> {
749 use tools::*;
750
751 let project_path_buf = project_path.to_path_buf();
752 let preamble = get_system_prompt(project_path, Some(query));
754 let is_generation = prompts::is_generation_query(query);
755
756 match provider {
757 ProviderType::OpenAI => {
758 let client = openai::Client::from_env();
759 let model_name = model.as_deref().unwrap_or("gpt-5.2");
760
761 let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
763 Some(serde_json::json!({
764 "reasoning": {
765 "effort": "medium",
766 "summary": "detailed"
767 }
768 }))
769 } else {
770 None
771 };
772
773 let mut builder = client
774 .agent(model_name)
775 .preamble(&preamble)
776 .max_tokens(4096)
777 .tool(AnalyzeTool::new(project_path_buf.clone()))
778 .tool(SecurityScanTool::new(project_path_buf.clone()))
779 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
780 .tool(ReadFileTool::new(project_path_buf.clone()))
781 .tool(ListDirectoryTool::new(project_path_buf.clone()));
782
783 if is_generation {
785 builder = builder
786 .tool(WriteFileTool::new(project_path_buf.clone()))
787 .tool(WriteFilesTool::new(project_path_buf.clone()))
788 .tool(ShellTool::new(project_path_buf.clone()));
789 }
790
791 if let Some(params) = reasoning_params {
792 builder = builder.additional_params(params);
793 }
794
795 let agent = builder.build();
796
797 agent
798 .prompt(query)
799 .multi_turn(50)
800 .await
801 .map_err(|e| AgentError::ProviderError(e.to_string()))
802 }
803 ProviderType::Anthropic => {
804 let client = anthropic::Client::from_env();
805 let model_name = model.as_deref().unwrap_or("claude-sonnet-4-20250514");
806
807 let mut builder = client
808 .agent(model_name)
809 .preamble(&preamble)
810 .max_tokens(4096)
811 .tool(AnalyzeTool::new(project_path_buf.clone()))
812 .tool(SecurityScanTool::new(project_path_buf.clone()))
813 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
814 .tool(ReadFileTool::new(project_path_buf.clone()))
815 .tool(ListDirectoryTool::new(project_path_buf.clone()));
816
817 if is_generation {
819 builder = builder
820 .tool(WriteFileTool::new(project_path_buf.clone()))
821 .tool(WriteFilesTool::new(project_path_buf.clone()))
822 .tool(ShellTool::new(project_path_buf.clone()));
823 }
824
825 let agent = builder.build();
826
827 agent
828 .prompt(query)
829 .multi_turn(50)
830 .await
831 .map_err(|e| AgentError::ProviderError(e.to_string()))
832 }
833 }
834}