1pub mod commands;
34pub mod history;
35pub mod ide;
36pub mod prompts;
37pub mod session;
38pub mod tools;
39pub mod ui;
40
41use colored::Colorize;
42use history::{ConversationHistory, ToolCallRecord};
43use ide::IdeClient;
44use rig::{
45 client::{CompletionClient, ProviderClient},
46 completion::Prompt,
47 providers::{anthropic, openai},
48};
49use session::ChatSession;
50use commands::TokenUsage;
51use std::path::Path;
52use std::sync::Arc;
53use tokio::sync::Mutex as TokioMutex;
54use ui::{ResponseFormatter, ToolDisplayHook};
55
56#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
58pub enum ProviderType {
59 #[default]
60 OpenAI,
61 Anthropic,
62}
63
64impl std::fmt::Display for ProviderType {
65 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
66 match self {
67 ProviderType::OpenAI => write!(f, "openai"),
68 ProviderType::Anthropic => write!(f, "anthropic"),
69 }
70 }
71}
72
73impl std::str::FromStr for ProviderType {
74 type Err = String;
75
76 fn from_str(s: &str) -> Result<Self, Self::Err> {
77 match s.to_lowercase().as_str() {
78 "openai" => Ok(ProviderType::OpenAI),
79 "anthropic" => Ok(ProviderType::Anthropic),
80 _ => Err(format!("Unknown provider: {}", s)),
81 }
82 }
83}
84
85#[derive(Debug, thiserror::Error)]
87pub enum AgentError {
88 #[error("Missing API key. Set {0} environment variable.")]
89 MissingApiKey(String),
90
91 #[error("Provider error: {0}")]
92 ProviderError(String),
93
94 #[error("Tool error: {0}")]
95 ToolError(String),
96}
97
98pub type AgentResult<T> = Result<T, AgentError>;
99
100fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
102 if let Some(q) = query {
103 if prompts::is_code_development_query(q) {
105 return prompts::get_code_development_prompt(project_path);
106 }
107 if prompts::is_generation_query(q) {
109 return prompts::get_devops_prompt(project_path);
110 }
111 }
112 prompts::get_analysis_prompt(project_path)
114}
115
116pub async fn run_interactive(
118 project_path: &Path,
119 provider: ProviderType,
120 model: Option<String>,
121) -> AgentResult<()> {
122 use tools::*;
123
124 let mut session = ChatSession::new(project_path, provider, model);
125
126 let mut conversation_history = ConversationHistory::new();
128
129 let ide_client: Option<Arc<TokioMutex<IdeClient>>> = {
131 let mut client = IdeClient::new().await;
132 if client.is_ide_available() {
133 match client.connect().await {
134 Ok(()) => {
135 println!(
136 "{} Connected to {} IDE companion",
137 "โ".green(),
138 client.ide_name().unwrap_or("VS Code")
139 );
140 Some(Arc::new(TokioMutex::new(client)))
141 }
142 Err(e) => {
143 println!(
145 "{} IDE companion not connected: {}",
146 "!".yellow(),
147 e
148 );
149 None
150 }
151 }
152 } else {
153 println!("{} No IDE detected (TERM_PROGRAM={})", "ยท".dimmed(), std::env::var("TERM_PROGRAM").unwrap_or_default());
154 None
155 }
156 };
157
158 ChatSession::load_api_key_to_env(session.provider);
160
161 if !ChatSession::has_api_key(session.provider) {
163 ChatSession::prompt_api_key(session.provider)?;
164 }
165
166 session.print_banner();
167
168 loop {
169 if !conversation_history.is_empty() {
171 println!("{}", format!(" ๐ฌ Context: {}", conversation_history.status()).dimmed());
172 }
173
174 let input = match session.read_input() {
176 Ok(input) => input,
177 Err(_) => break,
178 };
179
180 if input.is_empty() {
181 continue;
182 }
183
184 if ChatSession::is_command(&input) {
186 if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
188 conversation_history.clear();
189 }
190 match session.process_command(&input) {
191 Ok(true) => continue,
192 Ok(false) => break, Err(e) => {
194 eprintln!("{}", format!("Error: {}", e).red());
195 continue;
196 }
197 }
198 }
199
200 if !ChatSession::has_api_key(session.provider) {
202 eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
203 continue;
204 }
205
206 if conversation_history.needs_compaction() {
208 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
209 if let Some(summary) = conversation_history.compact() {
210 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
211 }
212 }
213
214 const MAX_RETRIES: u32 = 3;
220 const MAX_CONTINUATIONS: u32 = 10;
221 const TOOL_CALL_CHECKPOINT: usize = 50;
222 const MAX_TOOL_CALLS: usize = 300;
223 let mut retry_attempt = 0;
224 let mut continuation_count = 0;
225 let mut total_tool_calls: usize = 0;
226 let mut auto_continue_tools = false; let mut current_input = input.clone();
228 let mut succeeded = false;
229
230 while retry_attempt < MAX_RETRIES && continuation_count < MAX_CONTINUATIONS && !succeeded {
231
232 if continuation_count > 0 {
234 eprintln!("{}", format!(" ๐ก Sending continuation request...").dimmed());
235 }
236
237 let hook = ToolDisplayHook::new();
239
240 let project_path_buf = session.project_path.clone();
241 let preamble = get_system_prompt(&session.project_path, Some(¤t_input));
243 let is_generation = prompts::is_generation_query(¤t_input);
244
245 let mut chat_history = conversation_history.to_messages();
247
248 let response = match session.provider {
249 ProviderType::OpenAI => {
250 let client = openai::Client::from_env();
251 let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
254 Some(serde_json::json!({
255 "reasoning": {
256 "effort": "medium",
257 "summary": "detailed"
258 }
259 }))
260 } else {
261 None
262 };
263
264 let mut builder = client
265 .agent(&session.model)
266 .preamble(&preamble)
267 .max_tokens(4096)
268 .tool(AnalyzeTool::new(project_path_buf.clone()))
269 .tool(SecurityScanTool::new(project_path_buf.clone()))
270 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
271 .tool(HadolintTool::new(project_path_buf.clone()))
272 .tool(ReadFileTool::new(project_path_buf.clone()))
273 .tool(ListDirectoryTool::new(project_path_buf.clone()));
274
275 if is_generation {
277 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
279 (
280 WriteFileTool::new(project_path_buf.clone())
281 .with_ide_client(client.clone()),
282 WriteFilesTool::new(project_path_buf.clone())
283 .with_ide_client(client.clone()),
284 )
285 } else {
286 (
287 WriteFileTool::new(project_path_buf.clone()),
288 WriteFilesTool::new(project_path_buf.clone()),
289 )
290 };
291 builder = builder
292 .tool(write_file_tool)
293 .tool(write_files_tool)
294 .tool(ShellTool::new(project_path_buf.clone()));
295 }
296
297 if let Some(params) = reasoning_params {
298 builder = builder.additional_params(params);
299 }
300
301 let agent = builder.build();
302 agent.prompt(¤t_input)
306 .with_history(&mut chat_history)
307 .with_hook(hook.clone())
308 .multi_turn(50)
309 .await
310 }
311 ProviderType::Anthropic => {
312 let client = anthropic::Client::from_env();
313 let mut builder = client
314 .agent(&session.model)
315 .preamble(&preamble)
316 .max_tokens(4096)
317 .tool(AnalyzeTool::new(project_path_buf.clone()))
318 .tool(SecurityScanTool::new(project_path_buf.clone()))
319 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
320 .tool(HadolintTool::new(project_path_buf.clone()))
321 .tool(ReadFileTool::new(project_path_buf.clone()))
322 .tool(ListDirectoryTool::new(project_path_buf.clone()));
323
324 if is_generation {
326 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
328 (
329 WriteFileTool::new(project_path_buf.clone())
330 .with_ide_client(client.clone()),
331 WriteFilesTool::new(project_path_buf.clone())
332 .with_ide_client(client.clone()),
333 )
334 } else {
335 (
336 WriteFileTool::new(project_path_buf.clone()),
337 WriteFilesTool::new(project_path_buf.clone()),
338 )
339 };
340 builder = builder
341 .tool(write_file_tool)
342 .tool(write_files_tool)
343 .tool(ShellTool::new(project_path_buf.clone()));
344 }
345
346 let agent = builder.build();
347
348 agent.prompt(¤t_input)
352 .with_history(&mut chat_history)
353 .with_hook(hook.clone())
354 .multi_turn(50)
355 .await
356 }
357 };
358
359 match response {
360 Ok(text) => {
361 println!();
363 ResponseFormatter::print_response(&text);
364
365 let prompt_tokens = TokenUsage::estimate_tokens(&input);
367 let completion_tokens = TokenUsage::estimate_tokens(&text);
368 session.token_usage.add_request(prompt_tokens, completion_tokens);
369
370 let tool_calls = extract_tool_calls_from_hook(&hook).await;
372 let batch_tool_count = tool_calls.len();
373 total_tool_calls += batch_tool_count;
374
375 if batch_tool_count > 10 {
377 println!("{}", format!(" โ Completed with {} tool calls ({} total this session)", batch_tool_count, total_tool_calls).dimmed());
378 }
379
380 conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
382
383 if conversation_history.needs_compaction() {
386 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
387 if let Some(summary) = conversation_history.compact() {
388 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
389 }
390 }
391
392 session.history.push(("user".to_string(), input.clone()));
394 session.history.push(("assistant".to_string(), text));
395 succeeded = true;
396 }
397 Err(e) => {
398 let err_str = e.to_string();
399
400 println!();
401
402 if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
404 let completed_tools = extract_tool_calls_from_hook(&hook).await;
406 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
407 let batch_tool_count = completed_tools.len();
408 total_tool_calls += batch_tool_count;
409
410 eprintln!("{}", format!(
411 "โ Reached {} tool calls this batch ({} total). Maximum allowed: {}",
412 batch_tool_count, total_tool_calls, MAX_TOOL_CALLS
413 ).yellow());
414
415 if total_tool_calls >= MAX_TOOL_CALLS {
417 eprintln!("{}", format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS).red());
418 eprintln!("{}", "The task is too complex. Try breaking it into smaller parts.".dimmed());
419 break;
420 }
421
422 let should_continue = if auto_continue_tools {
424 eprintln!("{}", " Auto-continuing (you selected 'always')...".dimmed());
425 true
426 } else {
427 eprintln!("{}", "Excessive tool calls used. Want to continue?".yellow());
428 eprintln!("{}", " [y] Yes, continue [n] No, stop [a] Always continue".dimmed());
429 print!(" > ");
430 let _ = std::io::Write::flush(&mut std::io::stdout());
431
432 let mut response = String::new();
434 match std::io::stdin().read_line(&mut response) {
435 Ok(_) => {
436 let resp = response.trim().to_lowercase();
437 if resp == "a" || resp == "always" {
438 auto_continue_tools = true;
439 true
440 } else {
441 resp == "y" || resp == "yes" || resp.is_empty()
442 }
443 }
444 Err(_) => false,
445 }
446 };
447
448 if !should_continue {
449 eprintln!("{}", "Stopped by user. Type 'continue' to resume later.".dimmed());
450 if !completed_tools.is_empty() {
452 conversation_history.add_turn(
453 current_input.clone(),
454 format!("[Stopped at checkpoint - {} tools completed]", batch_tool_count),
455 vec![]
456 );
457 }
458 break;
459 }
460
461 eprintln!("{}", format!(
463 " โ Continuing... {} remaining tool calls available",
464 MAX_TOOL_CALLS - total_tool_calls
465 ).dimmed());
466
467 conversation_history.add_turn(
469 current_input.clone(),
470 format!("[Checkpoint - {} tools completed, continuing...]", batch_tool_count),
471 vec![]
472 );
473
474 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
476
477 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
479 continue; } else if err_str.contains("rate") || err_str.contains("Rate") || err_str.contains("429") {
481 eprintln!("{}", "โ Rate limited by API provider.".yellow());
482 retry_attempt += 1;
484 eprintln!("{}", format!(" Waiting 5 seconds before retry ({}/{})...", retry_attempt, MAX_RETRIES).dimmed());
485 tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
486 } else if is_truncation_error(&err_str) {
487 let completed_tools = extract_tool_calls_from_hook(&hook).await;
489 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
490
491 let completed_count = completed_tools.iter()
493 .filter(|t| !t.result_summary.contains("IN PROGRESS"))
494 .count();
495 let in_progress_count = completed_tools.len() - completed_count;
496
497 if !completed_tools.is_empty() && continuation_count < MAX_CONTINUATIONS {
498 continuation_count += 1;
500 let status_msg = if in_progress_count > 0 {
501 format!(
502 "โ Response truncated. {} completed, {} in-progress. Auto-continuing ({}/{})...",
503 completed_count, in_progress_count, continuation_count, MAX_CONTINUATIONS
504 )
505 } else {
506 format!(
507 "โ Response truncated. {} tool calls completed. Auto-continuing ({}/{})...",
508 completed_count, continuation_count, MAX_CONTINUATIONS
509 )
510 };
511 eprintln!("{}", status_msg.yellow());
512
513 conversation_history.add_turn(
518 current_input.clone(),
519 format!("[Partial response - {} tools completed, {} in-progress before truncation. See continuation prompt for details.]",
520 completed_count, in_progress_count),
521 vec![] );
523
524 if conversation_history.needs_compaction() {
527 eprintln!("{}", " ๐ฆ Compacting history before continuation...".dimmed());
528 if let Some(summary) = conversation_history.compact() {
529 eprintln!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
530 }
531 }
532
533 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
535
536 eprintln!("{}", format!(
538 " โ Continuing with {} files read, {} written, {} other actions tracked",
539 completed_tools.iter().filter(|t| t.tool_name == "read_file").count(),
540 completed_tools.iter().filter(|t| t.tool_name == "write_file" || t.tool_name == "write_files").count(),
541 completed_tools.iter().filter(|t| t.tool_name != "read_file" && t.tool_name != "write_file" && t.tool_name != "write_files" && t.tool_name != "list_directory").count()
542 ).dimmed());
543
544 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
546 } else if retry_attempt < MAX_RETRIES {
548 retry_attempt += 1;
550 eprintln!("{}", format!("โ Response error (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
551 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
552 } else {
553 eprintln!("{}", format!("Error: {}", e).red());
555 if continuation_count >= MAX_CONTINUATIONS {
556 eprintln!("{}", format!("Max continuations ({}) reached. The task is too complex for one request.", MAX_CONTINUATIONS).dimmed());
557 } else {
558 eprintln!("{}", "Max retries reached. The response may be too complex.".dimmed());
559 }
560 eprintln!("{}", "Try breaking your request into smaller parts.".dimmed());
561 break;
562 }
563 } else if err_str.contains("timeout") || err_str.contains("Timeout") {
564 retry_attempt += 1;
566 if retry_attempt < MAX_RETRIES {
567 eprintln!("{}", format!("โ Request timed out (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
568 tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
569 } else {
570 eprintln!("{}", "Request timed out. Please try again.".red());
571 break;
572 }
573 } else {
574 eprintln!("{}", format!("Error: {}", e).red());
576 if continuation_count > 0 {
577 eprintln!("{}", format!(" (occurred during continuation attempt {})", continuation_count).dimmed());
578 }
579 eprintln!("{}", "Error details for debugging:".dimmed());
580 eprintln!("{}", format!(" - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES).dimmed());
581 eprintln!("{}", format!(" - continuation_count: {}/{}", continuation_count, MAX_CONTINUATIONS).dimmed());
582 break;
583 }
584 }
585 }
586 }
587 println!();
588 }
589
590 Ok(())
591}
592
593async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
595 let state = hook.state();
596 let guard = state.lock().await;
597
598 guard.tool_calls.iter().enumerate().map(|(i, tc)| {
599 let result = if tc.is_running {
600 "[IN PROGRESS - may need to be re-run]".to_string()
602 } else if let Some(output) = &tc.output {
603 truncate_string(output, 200)
604 } else {
605 "completed".to_string()
606 };
607
608 ToolCallRecord {
609 tool_name: tc.name.clone(),
610 args_summary: truncate_string(&tc.args, 100),
611 result_summary: result,
612 tool_id: Some(format!("tool_{}_{}", tc.name, i)),
614 }
615 }).collect()
616}
617
618async fn extract_agent_messages_from_hook(hook: &ToolDisplayHook) -> Vec<String> {
620 let state = hook.state();
621 let guard = state.lock().await;
622 guard.agent_messages.clone()
623}
624
625fn truncate_string(s: &str, max_len: usize) -> String {
627 if s.len() <= max_len {
628 s.to_string()
629 } else {
630 format!("{}...", &s[..max_len.saturating_sub(3)])
631 }
632}
633
634fn is_truncation_error(err_str: &str) -> bool {
636 err_str.contains("JsonError")
637 || err_str.contains("EOF while parsing")
638 || err_str.contains("JSON")
639 || err_str.contains("unexpected end")
640}
641
642fn build_continuation_prompt(
645 original_task: &str,
646 completed_tools: &[ToolCallRecord],
647 agent_thinking: &[String],
648) -> String {
649 use std::collections::HashSet;
650
651 let mut files_read: HashSet<String> = HashSet::new();
653 let mut files_written: HashSet<String> = HashSet::new();
654 let mut dirs_listed: HashSet<String> = HashSet::new();
655 let mut other_tools: Vec<String> = Vec::new();
656 let mut in_progress: Vec<String> = Vec::new();
657
658 for tool in completed_tools {
659 let is_in_progress = tool.result_summary.contains("IN PROGRESS");
660
661 if is_in_progress {
662 in_progress.push(format!("{}({})", tool.tool_name, tool.args_summary));
663 continue;
664 }
665
666 match tool.tool_name.as_str() {
667 "read_file" => {
668 files_read.insert(tool.args_summary.clone());
670 }
671 "write_file" | "write_files" => {
672 files_written.insert(tool.args_summary.clone());
673 }
674 "list_directory" => {
675 dirs_listed.insert(tool.args_summary.clone());
676 }
677 _ => {
678 other_tools.push(format!("{}({})", tool.tool_name, truncate_string(&tool.args_summary, 40)));
679 }
680 }
681 }
682
683 let mut prompt = format!(
684 "[CONTINUE] Your previous response was interrupted. DO NOT repeat completed work.\n\n\
685 Original task: {}\n",
686 truncate_string(original_task, 500)
687 );
688
689 if !files_read.is_empty() {
691 prompt.push_str("\n== FILES ALREADY READ (do NOT read again) ==\n");
692 for file in &files_read {
693 prompt.push_str(&format!(" - {}\n", file));
694 }
695 }
696
697 if !dirs_listed.is_empty() {
698 prompt.push_str("\n== DIRECTORIES ALREADY LISTED ==\n");
699 for dir in &dirs_listed {
700 prompt.push_str(&format!(" - {}\n", dir));
701 }
702 }
703
704 if !files_written.is_empty() {
705 prompt.push_str("\n== FILES ALREADY WRITTEN ==\n");
706 for file in &files_written {
707 prompt.push_str(&format!(" - {}\n", file));
708 }
709 }
710
711 if !other_tools.is_empty() {
712 prompt.push_str("\n== OTHER COMPLETED ACTIONS ==\n");
713 for tool in other_tools.iter().take(20) {
714 prompt.push_str(&format!(" - {}\n", tool));
715 }
716 if other_tools.len() > 20 {
717 prompt.push_str(&format!(" ... and {} more\n", other_tools.len() - 20));
718 }
719 }
720
721 if !in_progress.is_empty() {
722 prompt.push_str("\n== INTERRUPTED (may need re-run) ==\n");
723 for tool in &in_progress {
724 prompt.push_str(&format!(" โ {}\n", tool));
725 }
726 }
727
728 if !agent_thinking.is_empty() {
730 if let Some(last_thought) = agent_thinking.last() {
731 prompt.push_str(&format!(
732 "\n== YOUR LAST THOUGHTS ==\n\"{}\"\n",
733 truncate_string(last_thought, 300)
734 ));
735 }
736 }
737
738 prompt.push_str("\n== INSTRUCTIONS ==\n");
739 prompt.push_str("IMPORTANT: Your previous response was too long and got cut off.\n");
740 prompt.push_str("1. Do NOT re-read files listed above - they are already in context.\n");
741 prompt.push_str("2. If writing a document, write it in SECTIONS - complete one section now, then continue.\n");
742 prompt.push_str("3. Keep your response SHORT and focused. Better to complete small chunks than fail on large ones.\n");
743 prompt.push_str("4. If the task involves writing a file, START WRITING NOW - don't explain what you'll do.\n");
744
745 prompt
746}
747
748pub async fn run_query(
750 project_path: &Path,
751 query: &str,
752 provider: ProviderType,
753 model: Option<String>,
754) -> AgentResult<String> {
755 use tools::*;
756
757 let project_path_buf = project_path.to_path_buf();
758 let preamble = get_system_prompt(project_path, Some(query));
760 let is_generation = prompts::is_generation_query(query);
761
762 match provider {
763 ProviderType::OpenAI => {
764 let client = openai::Client::from_env();
765 let model_name = model.as_deref().unwrap_or("gpt-5.2");
766
767 let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
769 Some(serde_json::json!({
770 "reasoning": {
771 "effort": "medium",
772 "summary": "detailed"
773 }
774 }))
775 } else {
776 None
777 };
778
779 let mut builder = client
780 .agent(model_name)
781 .preamble(&preamble)
782 .max_tokens(4096)
783 .tool(AnalyzeTool::new(project_path_buf.clone()))
784 .tool(SecurityScanTool::new(project_path_buf.clone()))
785 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
786 .tool(HadolintTool::new(project_path_buf.clone()))
787 .tool(ReadFileTool::new(project_path_buf.clone()))
788 .tool(ListDirectoryTool::new(project_path_buf.clone()));
789
790 if is_generation {
792 builder = builder
793 .tool(WriteFileTool::new(project_path_buf.clone()))
794 .tool(WriteFilesTool::new(project_path_buf.clone()))
795 .tool(ShellTool::new(project_path_buf.clone()));
796 }
797
798 if let Some(params) = reasoning_params {
799 builder = builder.additional_params(params);
800 }
801
802 let agent = builder.build();
803
804 agent
805 .prompt(query)
806 .multi_turn(50)
807 .await
808 .map_err(|e| AgentError::ProviderError(e.to_string()))
809 }
810 ProviderType::Anthropic => {
811 let client = anthropic::Client::from_env();
812 let model_name = model.as_deref().unwrap_or("claude-sonnet-4-20250514");
813
814 let mut builder = client
815 .agent(model_name)
816 .preamble(&preamble)
817 .max_tokens(4096)
818 .tool(AnalyzeTool::new(project_path_buf.clone()))
819 .tool(SecurityScanTool::new(project_path_buf.clone()))
820 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
821 .tool(HadolintTool::new(project_path_buf.clone()))
822 .tool(ReadFileTool::new(project_path_buf.clone()))
823 .tool(ListDirectoryTool::new(project_path_buf.clone()));
824
825 if is_generation {
827 builder = builder
828 .tool(WriteFileTool::new(project_path_buf.clone()))
829 .tool(WriteFilesTool::new(project_path_buf.clone()))
830 .tool(ShellTool::new(project_path_buf.clone()));
831 }
832
833 let agent = builder.build();
834
835 agent
836 .prompt(query)
837 .multi_turn(50)
838 .await
839 .map_err(|e| AgentError::ProviderError(e.to_string()))
840 }
841 }
842}