1pub mod commands;
34pub mod compact;
35pub mod history;
36pub mod ide;
37pub mod prompts;
38pub mod session;
39pub mod tools;
40pub mod ui;
41
42use colored::Colorize;
43use history::{ConversationHistory, ToolCallRecord};
44use ide::IdeClient;
45use rig::{
46 client::{CompletionClient, ProviderClient},
47 completion::Prompt,
48 providers::{anthropic, openai},
49};
50use session::ChatSession;
51use commands::TokenUsage;
52use std::path::Path;
53use std::sync::Arc;
54use tokio::sync::Mutex as TokioMutex;
55use ui::{ResponseFormatter, ToolDisplayHook};
56
57#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
59pub enum ProviderType {
60 #[default]
61 OpenAI,
62 Anthropic,
63 Bedrock,
64}
65
66impl std::fmt::Display for ProviderType {
67 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
68 match self {
69 ProviderType::OpenAI => write!(f, "openai"),
70 ProviderType::Anthropic => write!(f, "anthropic"),
71 ProviderType::Bedrock => write!(f, "bedrock"),
72 }
73 }
74}
75
76impl std::str::FromStr for ProviderType {
77 type Err = String;
78
79 fn from_str(s: &str) -> Result<Self, Self::Err> {
80 match s.to_lowercase().as_str() {
81 "openai" => Ok(ProviderType::OpenAI),
82 "anthropic" => Ok(ProviderType::Anthropic),
83 "bedrock" | "aws" | "aws-bedrock" => Ok(ProviderType::Bedrock),
84 _ => Err(format!("Unknown provider: {}. Use: openai, anthropic, or bedrock", s)),
85 }
86 }
87}
88
89#[derive(Debug, thiserror::Error)]
91pub enum AgentError {
92 #[error("Missing API key. Set {0} environment variable.")]
93 MissingApiKey(String),
94
95 #[error("Provider error: {0}")]
96 ProviderError(String),
97
98 #[error("Tool error: {0}")]
99 ToolError(String),
100}
101
102pub type AgentResult<T> = Result<T, AgentError>;
103
104fn get_system_prompt(project_path: &Path, query: Option<&str>) -> String {
106 if let Some(q) = query {
107 if prompts::is_code_development_query(q) {
109 return prompts::get_code_development_prompt(project_path);
110 }
111 if prompts::is_generation_query(q) {
113 return prompts::get_devops_prompt(project_path);
114 }
115 }
116 prompts::get_analysis_prompt(project_path)
118}
119
120pub async fn run_interactive(
122 project_path: &Path,
123 provider: ProviderType,
124 model: Option<String>,
125) -> AgentResult<()> {
126 use tools::*;
127
128 let mut session = ChatSession::new(project_path, provider, model);
129
130 let mut conversation_history = ConversationHistory::new();
132
133 let ide_client: Option<Arc<TokioMutex<IdeClient>>> = {
135 let mut client = IdeClient::new().await;
136 if client.is_ide_available() {
137 match client.connect().await {
138 Ok(()) => {
139 println!(
140 "{} Connected to {} IDE companion",
141 "โ".green(),
142 client.ide_name().unwrap_or("VS Code")
143 );
144 Some(Arc::new(TokioMutex::new(client)))
145 }
146 Err(e) => {
147 println!(
149 "{} IDE companion not connected: {}",
150 "!".yellow(),
151 e
152 );
153 None
154 }
155 }
156 } else {
157 println!("{} No IDE detected (TERM_PROGRAM={})", "ยท".dimmed(), std::env::var("TERM_PROGRAM").unwrap_or_default());
158 None
159 }
160 };
161
162 ChatSession::load_api_key_to_env(session.provider);
164
165 if !ChatSession::has_api_key(session.provider) {
167 ChatSession::prompt_api_key(session.provider)?;
168 }
169
170 session.print_banner();
171
172 loop {
173 if !conversation_history.is_empty() {
175 println!("{}", format!(" ๐ฌ Context: {}", conversation_history.status()).dimmed());
176 }
177
178 let input = match session.read_input() {
180 Ok(input) => input,
181 Err(_) => break,
182 };
183
184 if input.is_empty() {
185 continue;
186 }
187
188 if ChatSession::is_command(&input) {
190 if input.trim().to_lowercase() == "/clear" || input.trim().to_lowercase() == "/c" {
192 conversation_history.clear();
193 }
194 match session.process_command(&input) {
195 Ok(true) => continue,
196 Ok(false) => break, Err(e) => {
198 eprintln!("{}", format!("Error: {}", e).red());
199 continue;
200 }
201 }
202 }
203
204 if !ChatSession::has_api_key(session.provider) {
206 eprintln!("{}", "No API key configured. Use /provider to set one.".yellow());
207 continue;
208 }
209
210 if conversation_history.needs_compaction() {
212 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
213 if let Some(summary) = conversation_history.compact() {
214 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
215 }
216 }
217
218 const MAX_RETRIES: u32 = 3;
224 const MAX_CONTINUATIONS: u32 = 10;
225 const TOOL_CALL_CHECKPOINT: usize = 50;
226 const MAX_TOOL_CALLS: usize = 300;
227 let mut retry_attempt = 0;
228 let mut continuation_count = 0;
229 let mut total_tool_calls: usize = 0;
230 let mut auto_continue_tools = false; let mut current_input = input.clone();
232 let mut succeeded = false;
233
234 while retry_attempt < MAX_RETRIES && continuation_count < MAX_CONTINUATIONS && !succeeded {
235
236 if continuation_count > 0 {
238 eprintln!("{}", format!(" ๐ก Sending continuation request...").dimmed());
239 }
240
241 let hook = ToolDisplayHook::new();
243
244 let project_path_buf = session.project_path.clone();
245 let preamble = get_system_prompt(&session.project_path, Some(¤t_input));
247 let is_generation = prompts::is_generation_query(¤t_input);
248
249 let mut chat_history = conversation_history.to_messages();
251
252 let response = match session.provider {
253 ProviderType::OpenAI => {
254 let client = openai::Client::from_env();
255 let reasoning_params = if session.model.starts_with("gpt-5") || session.model.starts_with("o1") {
258 Some(serde_json::json!({
259 "reasoning": {
260 "effort": "medium",
261 "summary": "detailed"
262 }
263 }))
264 } else {
265 None
266 };
267
268 let mut builder = client
269 .agent(&session.model)
270 .preamble(&preamble)
271 .max_tokens(4096)
272 .tool(AnalyzeTool::new(project_path_buf.clone()))
273 .tool(SecurityScanTool::new(project_path_buf.clone()))
274 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
275 .tool(HadolintTool::new(project_path_buf.clone()))
276 .tool(TerraformFmtTool::new(project_path_buf.clone()))
277 .tool(TerraformValidateTool::new(project_path_buf.clone()))
278 .tool(TerraformInstallTool::new())
279 .tool(ReadFileTool::new(project_path_buf.clone()))
280 .tool(ListDirectoryTool::new(project_path_buf.clone()));
281
282 if is_generation {
284 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
286 (
287 WriteFileTool::new(project_path_buf.clone())
288 .with_ide_client(client.clone()),
289 WriteFilesTool::new(project_path_buf.clone())
290 .with_ide_client(client.clone()),
291 )
292 } else {
293 (
294 WriteFileTool::new(project_path_buf.clone()),
295 WriteFilesTool::new(project_path_buf.clone()),
296 )
297 };
298 builder = builder
299 .tool(write_file_tool)
300 .tool(write_files_tool)
301 .tool(ShellTool::new(project_path_buf.clone()));
302 }
303
304 if let Some(params) = reasoning_params {
305 builder = builder.additional_params(params);
306 }
307
308 let agent = builder.build();
309 agent.prompt(¤t_input)
313 .with_history(&mut chat_history)
314 .with_hook(hook.clone())
315 .multi_turn(50)
316 .await
317 }
318 ProviderType::Anthropic => {
319 let client = anthropic::Client::from_env();
320
321 let mut builder = client
328 .agent(&session.model)
329 .preamble(&preamble)
330 .max_tokens(4096)
331 .tool(AnalyzeTool::new(project_path_buf.clone()))
332 .tool(SecurityScanTool::new(project_path_buf.clone()))
333 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
334 .tool(HadolintTool::new(project_path_buf.clone()))
335 .tool(TerraformFmtTool::new(project_path_buf.clone()))
336 .tool(TerraformValidateTool::new(project_path_buf.clone()))
337 .tool(TerraformInstallTool::new())
338 .tool(ReadFileTool::new(project_path_buf.clone()))
339 .tool(ListDirectoryTool::new(project_path_buf.clone()));
340
341 if is_generation {
343 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
345 (
346 WriteFileTool::new(project_path_buf.clone())
347 .with_ide_client(client.clone()),
348 WriteFilesTool::new(project_path_buf.clone())
349 .with_ide_client(client.clone()),
350 )
351 } else {
352 (
353 WriteFileTool::new(project_path_buf.clone()),
354 WriteFilesTool::new(project_path_buf.clone()),
355 )
356 };
357 builder = builder
358 .tool(write_file_tool)
359 .tool(write_files_tool)
360 .tool(ShellTool::new(project_path_buf.clone()));
361 }
362
363 let agent = builder.build();
364
365 agent.prompt(¤t_input)
369 .with_history(&mut chat_history)
370 .with_hook(hook.clone())
371 .multi_turn(50)
372 .await
373 }
374 ProviderType::Bedrock => {
375 let client = rig_bedrock::client::Client::from_env();
377
378 let thinking_params = serde_json::json!({
382 "thinking": {
383 "type": "enabled",
384 "budget_tokens": 8000
385 }
386 });
387
388 let mut builder = client
389 .agent(&session.model)
390 .preamble(&preamble)
391 .max_tokens(16000) .tool(AnalyzeTool::new(project_path_buf.clone()))
393 .tool(SecurityScanTool::new(project_path_buf.clone()))
394 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
395 .tool(HadolintTool::new(project_path_buf.clone()))
396 .tool(TerraformFmtTool::new(project_path_buf.clone()))
397 .tool(TerraformValidateTool::new(project_path_buf.clone()))
398 .tool(TerraformInstallTool::new())
399 .tool(ReadFileTool::new(project_path_buf.clone()))
400 .tool(ListDirectoryTool::new(project_path_buf.clone()));
401
402 if is_generation {
404 let (write_file_tool, write_files_tool) = if let Some(ref client) = ide_client {
406 (
407 WriteFileTool::new(project_path_buf.clone())
408 .with_ide_client(client.clone()),
409 WriteFilesTool::new(project_path_buf.clone())
410 .with_ide_client(client.clone()),
411 )
412 } else {
413 (
414 WriteFileTool::new(project_path_buf.clone()),
415 WriteFilesTool::new(project_path_buf.clone()),
416 )
417 };
418 builder = builder
419 .tool(write_file_tool)
420 .tool(write_files_tool)
421 .tool(ShellTool::new(project_path_buf.clone()));
422 }
423
424 builder = builder.additional_params(thinking_params);
426
427 let agent = builder.build();
428
429 agent.prompt(¤t_input)
431 .with_history(&mut chat_history)
432 .with_hook(hook.clone())
433 .multi_turn(50)
434 .await
435 }
436 };
437
438 match response {
439 Ok(text) => {
440 println!();
442 ResponseFormatter::print_response(&text);
443
444 let prompt_tokens = TokenUsage::estimate_tokens(&input);
446 let completion_tokens = TokenUsage::estimate_tokens(&text);
447 session.token_usage.add_request(prompt_tokens, completion_tokens);
448
449 let tool_calls = extract_tool_calls_from_hook(&hook).await;
451 let batch_tool_count = tool_calls.len();
452 total_tool_calls += batch_tool_count;
453
454 if batch_tool_count > 10 {
456 println!("{}", format!(" โ Completed with {} tool calls ({} total this session)", batch_tool_count, total_tool_calls).dimmed());
457 }
458
459 conversation_history.add_turn(input.clone(), text.clone(), tool_calls);
461
462 if conversation_history.needs_compaction() {
465 println!("{}", " ๐ฆ Compacting conversation history...".dimmed());
466 if let Some(summary) = conversation_history.compact() {
467 println!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
468 }
469 }
470
471 session.history.push(("user".to_string(), input.clone()));
473 session.history.push(("assistant".to_string(), text));
474 succeeded = true;
475 }
476 Err(e) => {
477 let err_str = e.to_string();
478
479 println!();
480
481 if err_str.contains("MaxDepth") || err_str.contains("max_depth") || err_str.contains("reached limit") {
483 let completed_tools = extract_tool_calls_from_hook(&hook).await;
485 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
486 let batch_tool_count = completed_tools.len();
487 total_tool_calls += batch_tool_count;
488
489 eprintln!("{}", format!(
490 "โ Reached {} tool calls this batch ({} total). Maximum allowed: {}",
491 batch_tool_count, total_tool_calls, MAX_TOOL_CALLS
492 ).yellow());
493
494 if total_tool_calls >= MAX_TOOL_CALLS {
496 eprintln!("{}", format!("Maximum tool call limit ({}) reached.", MAX_TOOL_CALLS).red());
497 eprintln!("{}", "The task is too complex. Try breaking it into smaller parts.".dimmed());
498 break;
499 }
500
501 let should_continue = if auto_continue_tools {
503 eprintln!("{}", " Auto-continuing (you selected 'always')...".dimmed());
504 true
505 } else {
506 eprintln!("{}", "Excessive tool calls used. Want to continue?".yellow());
507 eprintln!("{}", " [y] Yes, continue [n] No, stop [a] Always continue".dimmed());
508 print!(" > ");
509 let _ = std::io::Write::flush(&mut std::io::stdout());
510
511 let mut response = String::new();
513 match std::io::stdin().read_line(&mut response) {
514 Ok(_) => {
515 let resp = response.trim().to_lowercase();
516 if resp == "a" || resp == "always" {
517 auto_continue_tools = true;
518 true
519 } else {
520 resp == "y" || resp == "yes" || resp.is_empty()
521 }
522 }
523 Err(_) => false,
524 }
525 };
526
527 if !should_continue {
528 eprintln!("{}", "Stopped by user. Type 'continue' to resume later.".dimmed());
529 if !completed_tools.is_empty() {
531 conversation_history.add_turn(
532 current_input.clone(),
533 format!("[Stopped at checkpoint - {} tools completed]", batch_tool_count),
534 vec![]
535 );
536 }
537 break;
538 }
539
540 eprintln!("{}", format!(
542 " โ Continuing... {} remaining tool calls available",
543 MAX_TOOL_CALLS - total_tool_calls
544 ).dimmed());
545
546 conversation_history.add_turn(
548 current_input.clone(),
549 format!("[Checkpoint - {} tools completed, continuing...]", batch_tool_count),
550 vec![]
551 );
552
553 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
555
556 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
558 continue; } else if err_str.contains("rate") || err_str.contains("Rate") || err_str.contains("429") {
560 eprintln!("{}", "โ Rate limited by API provider.".yellow());
561 retry_attempt += 1;
563 eprintln!("{}", format!(" Waiting 5 seconds before retry ({}/{})...", retry_attempt, MAX_RETRIES).dimmed());
564 tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
565 } else if is_truncation_error(&err_str) {
566 let completed_tools = extract_tool_calls_from_hook(&hook).await;
568 let agent_thinking = extract_agent_messages_from_hook(&hook).await;
569
570 let completed_count = completed_tools.iter()
572 .filter(|t| !t.result_summary.contains("IN PROGRESS"))
573 .count();
574 let in_progress_count = completed_tools.len() - completed_count;
575
576 if !completed_tools.is_empty() && continuation_count < MAX_CONTINUATIONS {
577 continuation_count += 1;
579 let status_msg = if in_progress_count > 0 {
580 format!(
581 "โ Response truncated. {} completed, {} in-progress. Auto-continuing ({}/{})...",
582 completed_count, in_progress_count, continuation_count, MAX_CONTINUATIONS
583 )
584 } else {
585 format!(
586 "โ Response truncated. {} tool calls completed. Auto-continuing ({}/{})...",
587 completed_count, continuation_count, MAX_CONTINUATIONS
588 )
589 };
590 eprintln!("{}", status_msg.yellow());
591
592 conversation_history.add_turn(
597 current_input.clone(),
598 format!("[Partial response - {} tools completed, {} in-progress before truncation. See continuation prompt for details.]",
599 completed_count, in_progress_count),
600 vec![] );
602
603 if conversation_history.needs_compaction() {
606 eprintln!("{}", " ๐ฆ Compacting history before continuation...".dimmed());
607 if let Some(summary) = conversation_history.compact() {
608 eprintln!("{}", format!(" โ Compressed {} turns", summary.matches("Turn").count()).dimmed());
609 }
610 }
611
612 current_input = build_continuation_prompt(&input, &completed_tools, &agent_thinking);
614
615 eprintln!("{}", format!(
617 " โ Continuing with {} files read, {} written, {} other actions tracked",
618 completed_tools.iter().filter(|t| t.tool_name == "read_file").count(),
619 completed_tools.iter().filter(|t| t.tool_name == "write_file" || t.tool_name == "write_files").count(),
620 completed_tools.iter().filter(|t| t.tool_name != "read_file" && t.tool_name != "write_file" && t.tool_name != "write_files" && t.tool_name != "list_directory").count()
621 ).dimmed());
622
623 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
625 } else if retry_attempt < MAX_RETRIES {
627 retry_attempt += 1;
629 eprintln!("{}", format!("โ Response error (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
630 tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
631 } else {
632 eprintln!("{}", format!("Error: {}", e).red());
634 if continuation_count >= MAX_CONTINUATIONS {
635 eprintln!("{}", format!("Max continuations ({}) reached. The task is too complex for one request.", MAX_CONTINUATIONS).dimmed());
636 } else {
637 eprintln!("{}", "Max retries reached. The response may be too complex.".dimmed());
638 }
639 eprintln!("{}", "Try breaking your request into smaller parts.".dimmed());
640 break;
641 }
642 } else if err_str.contains("timeout") || err_str.contains("Timeout") {
643 retry_attempt += 1;
645 if retry_attempt < MAX_RETRIES {
646 eprintln!("{}", format!("โ Request timed out (attempt {}/{}). Retrying...", retry_attempt, MAX_RETRIES).yellow());
647 tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
648 } else {
649 eprintln!("{}", "Request timed out. Please try again.".red());
650 break;
651 }
652 } else {
653 eprintln!("{}", format!("Error: {}", e).red());
655 if continuation_count > 0 {
656 eprintln!("{}", format!(" (occurred during continuation attempt {})", continuation_count).dimmed());
657 }
658 eprintln!("{}", "Error details for debugging:".dimmed());
659 eprintln!("{}", format!(" - retry_attempt: {}/{}", retry_attempt, MAX_RETRIES).dimmed());
660 eprintln!("{}", format!(" - continuation_count: {}/{}", continuation_count, MAX_CONTINUATIONS).dimmed());
661 break;
662 }
663 }
664 }
665 }
666 println!();
667 }
668
669 Ok(())
670}
671
672async fn extract_tool_calls_from_hook(hook: &ToolDisplayHook) -> Vec<ToolCallRecord> {
674 let state = hook.state();
675 let guard = state.lock().await;
676
677 guard.tool_calls.iter().enumerate().map(|(i, tc)| {
678 let result = if tc.is_running {
679 "[IN PROGRESS - may need to be re-run]".to_string()
681 } else if let Some(output) = &tc.output {
682 truncate_string(output, 200)
683 } else {
684 "completed".to_string()
685 };
686
687 ToolCallRecord {
688 tool_name: tc.name.clone(),
689 args_summary: truncate_string(&tc.args, 100),
690 result_summary: result,
691 tool_id: Some(format!("tool_{}_{}", tc.name, i)),
693 droppable: matches!(
695 tc.name.as_str(),
696 "read_file" | "list_directory" | "analyze_project"
697 ),
698 }
699 }).collect()
700}
701
702async fn extract_agent_messages_from_hook(hook: &ToolDisplayHook) -> Vec<String> {
704 let state = hook.state();
705 let guard = state.lock().await;
706 guard.agent_messages.clone()
707}
708
709fn truncate_string(s: &str, max_len: usize) -> String {
711 if s.len() <= max_len {
712 s.to_string()
713 } else {
714 format!("{}...", &s[..max_len.saturating_sub(3)])
715 }
716}
717
718fn is_truncation_error(err_str: &str) -> bool {
720 err_str.contains("JsonError")
721 || err_str.contains("EOF while parsing")
722 || err_str.contains("JSON")
723 || err_str.contains("unexpected end")
724}
725
726fn build_continuation_prompt(
729 original_task: &str,
730 completed_tools: &[ToolCallRecord],
731 agent_thinking: &[String],
732) -> String {
733 use std::collections::HashSet;
734
735 let mut files_read: HashSet<String> = HashSet::new();
737 let mut files_written: HashSet<String> = HashSet::new();
738 let mut dirs_listed: HashSet<String> = HashSet::new();
739 let mut other_tools: Vec<String> = Vec::new();
740 let mut in_progress: Vec<String> = Vec::new();
741
742 for tool in completed_tools {
743 let is_in_progress = tool.result_summary.contains("IN PROGRESS");
744
745 if is_in_progress {
746 in_progress.push(format!("{}({})", tool.tool_name, tool.args_summary));
747 continue;
748 }
749
750 match tool.tool_name.as_str() {
751 "read_file" => {
752 files_read.insert(tool.args_summary.clone());
754 }
755 "write_file" | "write_files" => {
756 files_written.insert(tool.args_summary.clone());
757 }
758 "list_directory" => {
759 dirs_listed.insert(tool.args_summary.clone());
760 }
761 _ => {
762 other_tools.push(format!("{}({})", tool.tool_name, truncate_string(&tool.args_summary, 40)));
763 }
764 }
765 }
766
767 let mut prompt = format!(
768 "[CONTINUE] Your previous response was interrupted. DO NOT repeat completed work.\n\n\
769 Original task: {}\n",
770 truncate_string(original_task, 500)
771 );
772
773 if !files_read.is_empty() {
775 prompt.push_str("\n== FILES ALREADY READ (do NOT read again) ==\n");
776 for file in &files_read {
777 prompt.push_str(&format!(" - {}\n", file));
778 }
779 }
780
781 if !dirs_listed.is_empty() {
782 prompt.push_str("\n== DIRECTORIES ALREADY LISTED ==\n");
783 for dir in &dirs_listed {
784 prompt.push_str(&format!(" - {}\n", dir));
785 }
786 }
787
788 if !files_written.is_empty() {
789 prompt.push_str("\n== FILES ALREADY WRITTEN ==\n");
790 for file in &files_written {
791 prompt.push_str(&format!(" - {}\n", file));
792 }
793 }
794
795 if !other_tools.is_empty() {
796 prompt.push_str("\n== OTHER COMPLETED ACTIONS ==\n");
797 for tool in other_tools.iter().take(20) {
798 prompt.push_str(&format!(" - {}\n", tool));
799 }
800 if other_tools.len() > 20 {
801 prompt.push_str(&format!(" ... and {} more\n", other_tools.len() - 20));
802 }
803 }
804
805 if !in_progress.is_empty() {
806 prompt.push_str("\n== INTERRUPTED (may need re-run) ==\n");
807 for tool in &in_progress {
808 prompt.push_str(&format!(" โ {}\n", tool));
809 }
810 }
811
812 if !agent_thinking.is_empty() {
814 if let Some(last_thought) = agent_thinking.last() {
815 prompt.push_str(&format!(
816 "\n== YOUR LAST THOUGHTS ==\n\"{}\"\n",
817 truncate_string(last_thought, 300)
818 ));
819 }
820 }
821
822 prompt.push_str("\n== INSTRUCTIONS ==\n");
823 prompt.push_str("IMPORTANT: Your previous response was too long and got cut off.\n");
824 prompt.push_str("1. Do NOT re-read files listed above - they are already in context.\n");
825 prompt.push_str("2. If writing a document, write it in SECTIONS - complete one section now, then continue.\n");
826 prompt.push_str("3. Keep your response SHORT and focused. Better to complete small chunks than fail on large ones.\n");
827 prompt.push_str("4. If the task involves writing a file, START WRITING NOW - don't explain what you'll do.\n");
828
829 prompt
830}
831
832pub async fn run_query(
834 project_path: &Path,
835 query: &str,
836 provider: ProviderType,
837 model: Option<String>,
838) -> AgentResult<String> {
839 use tools::*;
840
841 let project_path_buf = project_path.to_path_buf();
842 let preamble = get_system_prompt(project_path, Some(query));
844 let is_generation = prompts::is_generation_query(query);
845
846 match provider {
847 ProviderType::OpenAI => {
848 let client = openai::Client::from_env();
849 let model_name = model.as_deref().unwrap_or("gpt-5.2");
850
851 let reasoning_params = if model_name.starts_with("gpt-5") || model_name.starts_with("o1") {
853 Some(serde_json::json!({
854 "reasoning": {
855 "effort": "medium",
856 "summary": "detailed"
857 }
858 }))
859 } else {
860 None
861 };
862
863 let mut builder = client
864 .agent(model_name)
865 .preamble(&preamble)
866 .max_tokens(4096)
867 .tool(AnalyzeTool::new(project_path_buf.clone()))
868 .tool(SecurityScanTool::new(project_path_buf.clone()))
869 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
870 .tool(HadolintTool::new(project_path_buf.clone()))
871 .tool(TerraformFmtTool::new(project_path_buf.clone()))
872 .tool(TerraformValidateTool::new(project_path_buf.clone()))
873 .tool(TerraformInstallTool::new())
874 .tool(ReadFileTool::new(project_path_buf.clone()))
875 .tool(ListDirectoryTool::new(project_path_buf.clone()));
876
877 if is_generation {
879 builder = builder
880 .tool(WriteFileTool::new(project_path_buf.clone()))
881 .tool(WriteFilesTool::new(project_path_buf.clone()))
882 .tool(ShellTool::new(project_path_buf.clone()));
883 }
884
885 if let Some(params) = reasoning_params {
886 builder = builder.additional_params(params);
887 }
888
889 let agent = builder.build();
890
891 agent
892 .prompt(query)
893 .multi_turn(50)
894 .await
895 .map_err(|e| AgentError::ProviderError(e.to_string()))
896 }
897 ProviderType::Anthropic => {
898 let client = anthropic::Client::from_env();
899 let model_name = model.as_deref().unwrap_or("claude-sonnet-4-5-20250929");
900
901 let mut builder = client
906 .agent(model_name)
907 .preamble(&preamble)
908 .max_tokens(4096)
909 .tool(AnalyzeTool::new(project_path_buf.clone()))
910 .tool(SecurityScanTool::new(project_path_buf.clone()))
911 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
912 .tool(HadolintTool::new(project_path_buf.clone()))
913 .tool(TerraformFmtTool::new(project_path_buf.clone()))
914 .tool(TerraformValidateTool::new(project_path_buf.clone()))
915 .tool(TerraformInstallTool::new())
916 .tool(ReadFileTool::new(project_path_buf.clone()))
917 .tool(ListDirectoryTool::new(project_path_buf.clone()));
918
919 if is_generation {
921 builder = builder
922 .tool(WriteFileTool::new(project_path_buf.clone()))
923 .tool(WriteFilesTool::new(project_path_buf.clone()))
924 .tool(ShellTool::new(project_path_buf.clone()));
925 }
926
927 let agent = builder.build();
928
929 agent
930 .prompt(query)
931 .multi_turn(50)
932 .await
933 .map_err(|e| AgentError::ProviderError(e.to_string()))
934 }
935 ProviderType::Bedrock => {
936 let client = rig_bedrock::client::Client::from_env();
938 let model_name = model.as_deref().unwrap_or("global.anthropic.claude-sonnet-4-5-20250929-v1:0");
939
940 let thinking_params = serde_json::json!({
942 "thinking": {
943 "type": "enabled",
944 "budget_tokens": 8000
945 }
946 });
947
948 let mut builder = client
949 .agent(model_name)
950 .preamble(&preamble)
951 .max_tokens(16000) .tool(AnalyzeTool::new(project_path_buf.clone()))
953 .tool(SecurityScanTool::new(project_path_buf.clone()))
954 .tool(VulnerabilitiesTool::new(project_path_buf.clone()))
955 .tool(HadolintTool::new(project_path_buf.clone()))
956 .tool(TerraformFmtTool::new(project_path_buf.clone()))
957 .tool(TerraformValidateTool::new(project_path_buf.clone()))
958 .tool(TerraformInstallTool::new())
959 .tool(ReadFileTool::new(project_path_buf.clone()))
960 .tool(ListDirectoryTool::new(project_path_buf.clone()));
961
962 if is_generation {
964 builder = builder
965 .tool(WriteFileTool::new(project_path_buf.clone()))
966 .tool(WriteFilesTool::new(project_path_buf.clone()))
967 .tool(ShellTool::new(project_path_buf.clone()));
968 }
969
970 let agent = builder
971 .additional_params(thinking_params)
972 .build();
973
974 agent
975 .prompt(query)
976 .multi_turn(50)
977 .await
978 .map_err(|e| AgentError::ProviderError(e.to_string()))
979 }
980 }
981}