git_iris/agents/
iris.rs

1//! Iris Agent - The unified AI agent for Git-Iris operations
2//!
3//! This agent can handle any Git workflow task through capability-based prompts
4//! and multi-turn execution using Rig. One agent to rule them all! ✨
5
6use anyhow::Result;
7use rig::agent::{AgentBuilder, PromptResponse};
8use rig::completion::CompletionModel;
9use schemars::JsonSchema;
10use serde::de::DeserializeOwned;
11use serde::{Deserialize, Serialize};
12use serde_json::json;
13use std::borrow::Cow;
14use std::collections::HashMap;
15use std::fmt;
16
17// Embed capability TOML files at compile time so they're always available
18const CAPABILITY_COMMIT: &str = include_str!("capabilities/commit.toml");
19const CAPABILITY_PR: &str = include_str!("capabilities/pr.toml");
20const CAPABILITY_REVIEW: &str = include_str!("capabilities/review.toml");
21const CAPABILITY_CHANGELOG: &str = include_str!("capabilities/changelog.toml");
22const CAPABILITY_RELEASE_NOTES: &str = include_str!("capabilities/release_notes.toml");
23const CAPABILITY_CHAT: &str = include_str!("capabilities/chat.toml");
24const CAPABILITY_SEMANTIC_BLAME: &str = include_str!("capabilities/semantic_blame.toml");
25
26/// Default preamble for Iris agent
27const DEFAULT_PREAMBLE: &str = "\
28You are Iris, a helpful AI assistant specialized in Git operations and workflows.
29
30You have access to Git tools, code analysis tools, and powerful sub-agent capabilities for handling large analyses.
31
32**File Access Tools:**
33- **file_read** - Read file contents directly. Use `start_line` and `num_lines` for large files.
34- **file_analyzer** - Get metadata and structure analysis of files.
35- **code_search** - Search for patterns across files. Use sparingly; prefer file_read for known files.
36
37**Sub-Agent Tools:**
38
391. **parallel_analyze** - Run multiple analysis tasks CONCURRENTLY with independent context windows
40   - Best for: Large changesets (>500 lines or >20 files), batch commit analysis
41   - Each task runs in its own subagent, preventing context overflow
42   - Example: parallel_analyze({ \"tasks\": [\"Analyze auth/ changes for security\", \"Review db/ for performance\", \"Check api/ for breaking changes\"] })
43
442. **analyze_subagent** - Delegate a single focused task to a sub-agent
45   - Best for: Deep dive on specific files or focused analysis
46
47**Best Practices:**
48- Use git_diff to get changes first - it includes file content
49- Use file_read to read files directly instead of multiple code_search calls
50- Use parallel_analyze for large changesets to avoid context overflow";
51
52use crate::agents::provider::{self, DynAgent};
53use crate::agents::tools::{GitRepoInfo, ParallelAnalyze, Workspace};
54
55/// Trait for streaming callback to handle real-time response processing
56#[async_trait::async_trait]
57pub trait StreamingCallback: Send + Sync {
58    /// Called when a new chunk of text is received
59    async fn on_chunk(
60        &self,
61        chunk: &str,
62        tokens: Option<crate::agents::status::TokenMetrics>,
63    ) -> Result<()>;
64
65    /// Called when the response is complete
66    async fn on_complete(
67        &self,
68        full_response: &str,
69        final_tokens: crate::agents::status::TokenMetrics,
70    ) -> Result<()>;
71
72    /// Called when an error occurs
73    async fn on_error(&self, error: &anyhow::Error) -> Result<()>;
74
75    /// Called for status updates
76    async fn on_status_update(&self, message: &str) -> Result<()>;
77}
78
79/// Unified response type that can hold any structured output
80#[derive(Debug, Clone, Serialize, Deserialize)]
81pub enum StructuredResponse {
82    CommitMessage(crate::types::GeneratedMessage),
83    PullRequest(crate::types::MarkdownPullRequest),
84    Changelog(crate::types::MarkdownChangelog),
85    ReleaseNotes(crate::types::MarkdownReleaseNotes),
86    /// Markdown-based review (LLM-driven structure)
87    MarkdownReview(crate::types::MarkdownReview),
88    /// Semantic blame explanation (plain text)
89    SemanticBlame(String),
90    PlainText(String),
91}
92
93impl fmt::Display for StructuredResponse {
94    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
95        match self {
96            StructuredResponse::CommitMessage(msg) => {
97                write!(f, "{}", crate::types::format_commit_message(msg))
98            }
99            StructuredResponse::PullRequest(pr) => {
100                write!(f, "{}", pr.raw_content())
101            }
102            StructuredResponse::Changelog(cl) => {
103                write!(f, "{}", cl.raw_content())
104            }
105            StructuredResponse::ReleaseNotes(rn) => {
106                write!(f, "{}", rn.raw_content())
107            }
108            StructuredResponse::MarkdownReview(review) => {
109                write!(f, "{}", review.format())
110            }
111            StructuredResponse::SemanticBlame(explanation) => {
112                write!(f, "{explanation}")
113            }
114            StructuredResponse::PlainText(text) => {
115                write!(f, "{text}")
116            }
117        }
118    }
119}
120
121/// Extract JSON from a potentially verbose response that might contain explanations
122fn extract_json_from_response(response: &str) -> Result<String> {
123    use crate::agents::debug;
124
125    debug::debug_section("JSON Extraction");
126
127    let trimmed_response = response.trim();
128
129    // First, try parsing the entire response as JSON (for well-behaved responses)
130    if trimmed_response.starts_with('{')
131        && serde_json::from_str::<serde_json::Value>(trimmed_response).is_ok()
132    {
133        debug::debug_context_management(
134            "Response is pure JSON",
135            &format!("{} characters", trimmed_response.len()),
136        );
137        return Ok(trimmed_response.to_string());
138    }
139
140    // Try to find JSON within markdown code blocks
141    if let Some(start) = response.find("```json") {
142        let content_start = start + "```json".len();
143        // Find the closing ``` on its own line (to avoid matching ``` inside JSON strings)
144        // First try with newline prefix to find standalone closing marker
145        let json_end = if let Some(end) = response[content_start..].find("\n```") {
146            // Found it with newline - the JSON ends before the newline
147            end
148        } else {
149            // Fallback: try to find ``` at start of response section or end of string
150            response[content_start..]
151                .find("```")
152                .unwrap_or(response.len() - content_start)
153        };
154
155        let json_content = &response[content_start..content_start + json_end];
156        let trimmed = json_content.trim().to_string();
157
158        debug::debug_context_management(
159            "Found JSON in markdown code block",
160            &format!("{} characters", trimmed.len()),
161        );
162
163        // Save extracted JSON for debugging
164        if let Err(e) = debug::write_debug_artifact("iris_extracted.json", &trimmed) {
165            debug::debug_warning(&format!("Failed to write extracted JSON: {}", e));
166        }
167
168        debug::debug_json_parse_attempt(&trimmed);
169        return Ok(trimmed);
170    }
171
172    // Look for JSON objects by finding { and matching }
173    let mut brace_count = 0;
174    let mut json_start = None;
175    let mut json_end = None;
176
177    for (i, ch) in response.char_indices() {
178        match ch {
179            '{' => {
180                if brace_count == 0 {
181                    json_start = Some(i);
182                }
183                brace_count += 1;
184            }
185            '}' => {
186                brace_count -= 1;
187                if brace_count == 0 && json_start.is_some() {
188                    json_end = Some(i + 1);
189                    break;
190                }
191            }
192            _ => {}
193        }
194    }
195
196    if let (Some(start), Some(end)) = (json_start, json_end) {
197        let json_content = &response[start..end];
198        debug::debug_json_parse_attempt(json_content);
199
200        // Try to sanitize before validating (control characters in strings)
201        let sanitized = sanitize_json_response(json_content);
202
203        // Validate it's actually JSON by attempting to parse it
204        let _: serde_json::Value = serde_json::from_str(&sanitized).map_err(|e| {
205            debug::debug_json_parse_error(&format!(
206                "Found JSON-like content but it's not valid JSON: {}",
207                e
208            ));
209            // Include more context in the error for debugging
210            let preview = if json_content.len() > 200 {
211                format!("{}...", &json_content[..200])
212            } else {
213                json_content.to_string()
214            };
215            anyhow::anyhow!(
216                "Found JSON-like content but it's not valid JSON: {}\nPreview: {}",
217                e,
218                preview
219            )
220        })?;
221
222        debug::debug_context_management(
223            "Found valid JSON object",
224            &format!("{} characters", json_content.len()),
225        );
226        return Ok(sanitized.into_owned());
227    }
228
229    // If no JSON found, check if the response is raw markdown that we can wrap
230    // This handles cases where the model returns markdown directly without JSON wrapper
231    let trimmed = response.trim();
232    if trimmed.starts_with('#') || trimmed.starts_with("##") {
233        debug::debug_context_management(
234            "Detected raw markdown response",
235            "Wrapping in JSON structure",
236        );
237        // Escape the markdown content for JSON and wrap it
238        let escaped_content = serde_json::to_string(trimmed)?;
239        // escaped_content includes quotes, so we need to use it directly as the value
240        let wrapped = format!(r#"{{"content": {}}}"#, escaped_content);
241        debug::debug_json_parse_attempt(&wrapped);
242        return Ok(wrapped);
243    }
244
245    // If no JSON found, return error
246    debug::debug_json_parse_error("No valid JSON found in response");
247    Err(anyhow::anyhow!("No valid JSON found in response"))
248}
249
250/// Some providers (Anthropic) occasionally send literal control characters like newlines
251/// inside JSON strings, which violates strict JSON parsing rules. This helper sanitizes
252/// those responses by escaping control characters only within string literals while
253/// leaving the rest of the payload untouched.
254fn sanitize_json_response(raw: &str) -> Cow<'_, str> {
255    let mut needs_sanitization = false;
256    let mut in_string = false;
257    let mut escaped = false;
258
259    for ch in raw.chars() {
260        if in_string {
261            if escaped {
262                escaped = false;
263                continue;
264            }
265
266            match ch {
267                '\\' => escaped = true,
268                '"' => in_string = false,
269                '\n' | '\r' | '\t' => {
270                    needs_sanitization = true;
271                    break;
272                }
273                c if c.is_control() => {
274                    needs_sanitization = true;
275                    break;
276                }
277                _ => {}
278            }
279        } else if ch == '"' {
280            in_string = true;
281        }
282    }
283
284    if !needs_sanitization {
285        return Cow::Borrowed(raw);
286    }
287
288    let mut sanitized = String::with_capacity(raw.len());
289    in_string = false;
290    escaped = false;
291
292    for ch in raw.chars() {
293        if in_string {
294            if escaped {
295                sanitized.push(ch);
296                escaped = false;
297                continue;
298            }
299
300            match ch {
301                '\\' => {
302                    sanitized.push('\\');
303                    escaped = true;
304                }
305                '"' => {
306                    sanitized.push('"');
307                    in_string = false;
308                }
309                '\n' => sanitized.push_str("\\n"),
310                '\r' => sanitized.push_str("\\r"),
311                '\t' => sanitized.push_str("\\t"),
312                c if c.is_control() => {
313                    use std::fmt::Write as _;
314                    let _ = write!(&mut sanitized, "\\u{:04X}", u32::from(c));
315                }
316                _ => sanitized.push(ch),
317            }
318        } else {
319            sanitized.push(ch);
320            if ch == '"' {
321                in_string = true;
322                escaped = false;
323            }
324        }
325    }
326
327    Cow::Owned(sanitized)
328}
329
330/// Parse JSON with schema validation and error recovery
331///
332/// This function attempts to parse JSON with the following strategy:
333/// 1. Try direct parsing (fast path for well-formed responses)
334/// 2. If that fails, use the output validator for recovery
335/// 3. Log any warnings about recovered issues
336fn parse_with_recovery<T>(json_str: &str) -> Result<T>
337where
338    T: JsonSchema + DeserializeOwned,
339{
340    use crate::agents::debug as agent_debug;
341    use crate::agents::output_validator::validate_and_parse;
342
343    let validation_result = validate_and_parse::<T>(json_str)?;
344
345    // Log recovery warnings
346    if validation_result.recovered {
347        agent_debug::debug_context_management(
348            "JSON recovery applied",
349            &format!("{} issues fixed", validation_result.warnings.len()),
350        );
351        for warning in &validation_result.warnings {
352            agent_debug::debug_warning(warning);
353        }
354    }
355
356    validation_result
357        .value
358        .ok_or_else(|| anyhow::anyhow!("Failed to parse JSON even after recovery"))
359}
360
361/// The unified Iris agent that can handle any Git-Iris task
362///
363/// Note: This struct is Send + Sync safe - we don't store the client builder,
364/// instead we create it fresh when needed. This allows the agent to be used
365/// across async boundaries with `tokio::spawn`.
366pub struct IrisAgent {
367    provider: String,
368    model: String,
369    /// Fast model for subagents and simple tasks
370    fast_model: Option<String>,
371    /// Current capability/task being executed
372    current_capability: Option<String>,
373    /// Provider configuration
374    provider_config: HashMap<String, String>,
375    /// Custom preamble
376    preamble: Option<String>,
377    /// Configuration for features like gitmoji, presets, etc.
378    config: Option<crate::config::Config>,
379    /// Optional sender for content updates (used in Studio chat mode)
380    content_update_sender: Option<crate::agents::tools::ContentUpdateSender>,
381    /// Persistent workspace for notes and task tracking (shared across agent invocations)
382    workspace: Workspace,
383}
384
385impl IrisAgent {
386    /// Create a new Iris agent with the given provider and model
387    pub fn new(provider: &str, model: &str) -> Result<Self> {
388        Ok(Self {
389            provider: provider.to_string(),
390            model: model.to_string(),
391            fast_model: None,
392            current_capability: None,
393            provider_config: HashMap::new(),
394            preamble: None,
395            config: None,
396            content_update_sender: None,
397            workspace: Workspace::new(),
398        })
399    }
400
401    /// Set the content update sender for Studio chat mode
402    ///
403    /// When set, the agent will have access to tools for updating
404    /// commit messages, PR descriptions, and reviews.
405    pub fn set_content_update_sender(&mut self, sender: crate::agents::tools::ContentUpdateSender) {
406        self.content_update_sender = Some(sender);
407    }
408
409    /// Get the effective fast model (configured or same as main model)
410    fn effective_fast_model(&self) -> &str {
411        self.fast_model.as_deref().unwrap_or(&self.model)
412    }
413
414    /// Get the API key for the current provider from config
415    fn get_api_key(&self) -> Option<&str> {
416        self.config
417            .as_ref()
418            .and_then(|c| c.get_provider_config(&self.provider))
419            .and_then(|pc| pc.api_key_if_set())
420    }
421
422    /// Build the actual agent for execution
423    ///
424    /// Uses provider-specific builders (rig-core 0.27+) with enum dispatch for runtime
425    /// provider selection. Each provider arm builds both the subagent and main agent
426    /// with proper typing.
427    fn build_agent(&self) -> Result<DynAgent> {
428        use crate::agents::debug_tool::DebugTool;
429
430        let preamble = self.preamble.as_deref().unwrap_or(DEFAULT_PREAMBLE);
431        let fast_model = self.effective_fast_model();
432        let api_key = self.get_api_key();
433        let subagent_timeout = self
434            .config
435            .as_ref()
436            .map_or(120, |c| c.subagent_timeout_secs);
437
438        // Macro to build and configure subagent with core tools
439        macro_rules! build_subagent {
440            ($builder:expr) => {{
441                let builder = $builder
442                    .name("analyze_subagent")
443                    .description("Delegate focused analysis tasks to a sub-agent with its own context window. Use for analyzing specific files, commits, or code sections independently. The sub-agent has access to Git tools (diff, log, status) and file analysis tools.")
444                    .preamble("You are a specialized analysis sub-agent for Iris. Your job is to complete focused analysis tasks and return concise, actionable summaries.
445
446Guidelines:
447- Use the available tools to gather information
448- Focus only on what's asked - don't expand scope
449- Return a clear, structured summary of findings
450- Highlight important issues, patterns, or insights
451- Keep your response focused and concise")
452                    .max_tokens(4096);
453                let builder = self.apply_reasoning_defaults(builder);
454                crate::attach_core_tools!(builder).build()
455            }};
456        }
457
458        // Macro to attach main agent tools (excluding subagent which varies by type)
459        macro_rules! attach_main_tools {
460            ($builder:expr) => {{
461                crate::attach_core_tools!($builder)
462                    .tool(DebugTool::new(GitRepoInfo))
463                    .tool(DebugTool::new(self.workspace.clone()))
464                    .tool(DebugTool::new(ParallelAnalyze::with_timeout(
465                        &self.provider,
466                        fast_model,
467                        subagent_timeout,
468                        api_key,
469                    )?))
470            }};
471        }
472
473        // Macro to optionally attach content update tools
474        macro_rules! maybe_attach_update_tools {
475            ($builder:expr) => {{
476                if let Some(sender) = &self.content_update_sender {
477                    use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
478                    $builder
479                        .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
480                        .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
481                        .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
482                        .build()
483                } else {
484                    $builder.build()
485                }
486            }};
487        }
488
489        match self.provider.as_str() {
490            "openai" => {
491                // Build subagent
492                let sub_agent = build_subagent!(provider::openai_builder(fast_model, api_key)?);
493
494                // Build main agent
495                let builder = provider::openai_builder(&self.model, api_key)?
496                    .preamble(preamble)
497                    .max_tokens(16384);
498                let builder = self.apply_reasoning_defaults(builder);
499                let builder = attach_main_tools!(builder).tool(sub_agent);
500                let agent = maybe_attach_update_tools!(builder);
501                Ok(DynAgent::OpenAI(agent))
502            }
503            "anthropic" => {
504                // Build subagent
505                let sub_agent = build_subagent!(provider::anthropic_builder(fast_model, api_key)?);
506
507                // Build main agent
508                let builder = provider::anthropic_builder(&self.model, api_key)?
509                    .preamble(preamble)
510                    .max_tokens(16384);
511                let builder = self.apply_reasoning_defaults(builder);
512                let builder = attach_main_tools!(builder).tool(sub_agent);
513                let agent = maybe_attach_update_tools!(builder);
514                Ok(DynAgent::Anthropic(agent))
515            }
516            "google" | "gemini" => {
517                // Build subagent
518                let sub_agent = build_subagent!(provider::gemini_builder(fast_model, api_key)?);
519
520                // Build main agent
521                let builder = provider::gemini_builder(&self.model, api_key)?
522                    .preamble(preamble)
523                    .max_tokens(16384);
524                let builder = self.apply_reasoning_defaults(builder);
525                let builder = attach_main_tools!(builder).tool(sub_agent);
526                let agent = maybe_attach_update_tools!(builder);
527                Ok(DynAgent::Gemini(agent))
528            }
529            _ => Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
530        }
531    }
532
533    fn apply_reasoning_defaults<M>(&self, builder: AgentBuilder<M>) -> AgentBuilder<M>
534    where
535        M: CompletionModel,
536    {
537        if self.provider == "openai" && Self::requires_reasoning_effort(&self.model) {
538            builder.additional_params(json!({
539                "reasoning": {
540                    "effort": "low"
541                }
542            }))
543        } else {
544            builder
545        }
546    }
547
548    fn requires_reasoning_effort(model: &str) -> bool {
549        let model = model.to_lowercase();
550        model.starts_with("gpt-5") || model.starts_with("gpt-4.1") || model.starts_with("o1")
551    }
552
553    /// Execute task using agent with tools and parse structured JSON response
554    /// This is the core method that enables Iris to use tools and generate structured outputs
555    async fn execute_with_agent<T>(&self, system_prompt: &str, user_prompt: &str) -> Result<T>
556    where
557        T: JsonSchema + for<'a> serde::Deserialize<'a> + serde::Serialize + Send + Sync + 'static,
558    {
559        use crate::agents::debug;
560        use crate::agents::status::IrisPhase;
561        use crate::messages::get_capability_message;
562        use schemars::schema_for;
563
564        let capability = self.current_capability().unwrap_or("commit");
565
566        debug::debug_phase_change(&format!("AGENT EXECUTION: {}", std::any::type_name::<T>()));
567
568        // Update status - building agent (capability-aware)
569        let msg = get_capability_message(capability);
570        crate::iris_status_dynamic!(IrisPhase::Planning, msg.text, 2, 4);
571
572        // Build agent with all tools attached
573        let agent = self.build_agent()?;
574        debug::debug_context_management(
575            "Agent built with tools",
576            &format!(
577                "Provider: {}, Model: {} (fast: {})",
578                self.provider,
579                self.model,
580                self.effective_fast_model()
581            ),
582        );
583
584        // Create JSON schema for the response type
585        let schema = schema_for!(T);
586        let schema_json = serde_json::to_string_pretty(&schema)?;
587        debug::debug_context_management(
588            "JSON schema created",
589            &format!("Type: {}", std::any::type_name::<T>()),
590        );
591
592        // Enhanced prompt that instructs Iris to use tools and respond with JSON
593        let full_prompt = format!(
594            "{system_prompt}\n\n{user_prompt}\n\n\
595            === CRITICAL: RESPONSE FORMAT ===\n\
596            After using the available tools to gather necessary information, you MUST respond with ONLY a valid JSON object.\n\n\
597            REQUIRED JSON SCHEMA:\n\
598            {schema_json}\n\n\
599            CRITICAL INSTRUCTIONS:\n\
600            - Return ONLY the raw JSON object - nothing else\n\
601            - NO explanations before the JSON\n\
602            - NO explanations after the JSON\n\
603            - NO markdown code blocks (just raw JSON)\n\
604            - NO preamble text like 'Here is the JSON:' or 'Let me generate:'\n\
605            - Start your response with {{ and end with }}\n\
606            - The JSON must be complete and valid\n\n\
607            Your entire response should be ONLY the JSON object."
608        );
609
610        debug::debug_llm_request(&full_prompt, Some(16384));
611
612        // Update status - generation phase (capability-aware)
613        let gen_msg = get_capability_message(capability);
614        crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
615
616        // Prompt the agent with multi-turn support
617        // Set multi_turn to allow the agent to call multiple tools (default is 0 = single-shot)
618        // For complex tasks like PRs and release notes, Iris may need many tool calls to analyze all changes
619        // The agent knows when to stop, so we give it plenty of room (50 rounds)
620        let timer = debug::DebugTimer::start("Agent prompt execution");
621
622        debug::debug_context_management(
623            "LLM request",
624            "Sending prompt to agent with multi_turn(50)",
625        );
626        let prompt_response: PromptResponse = agent.prompt_extended(&full_prompt, 50).await?;
627
628        timer.finish();
629
630        // Extract usage stats for debug output
631        let usage = &prompt_response.total_usage;
632        debug::debug_context_management(
633            "Token usage",
634            &format!(
635                "input: {} | output: {} | total: {}",
636                usage.input_tokens, usage.output_tokens, usage.total_tokens
637            ),
638        );
639
640        let response = &prompt_response.output;
641        #[allow(clippy::cast_possible_truncation, clippy::as_conversions)]
642        let total_tokens_usize = usage.total_tokens as usize;
643        debug::debug_llm_response(
644            response,
645            std::time::Duration::from_secs(0),
646            Some(total_tokens_usize),
647        );
648
649        // Update status - synthesis phase
650        crate::iris_status_dynamic!(
651            IrisPhase::Synthesis,
652            "✨ Iris is synthesizing results...",
653            4,
654            4
655        );
656
657        // Extract and parse JSON from the response
658        let json_str = extract_json_from_response(response)?;
659        let sanitized_json = sanitize_json_response(&json_str);
660        let sanitized_ref = sanitized_json.as_ref();
661
662        if matches!(sanitized_json, Cow::Borrowed(_)) {
663            debug::debug_json_parse_attempt(sanitized_ref);
664        } else {
665            debug::debug_context_management(
666                "Sanitized JSON response",
667                &format!("{} → {} characters", json_str.len(), sanitized_ref.len()),
668            );
669            debug::debug_json_parse_attempt(sanitized_ref);
670        }
671
672        // Use the output validator for robust parsing with error recovery
673        let result: T = parse_with_recovery(sanitized_ref)?;
674
675        debug::debug_json_parse_success(std::any::type_name::<T>());
676
677        // Update status - completed
678        crate::iris_status_completed!();
679
680        Ok(result)
681    }
682
683    /// Inject style instructions into the system prompt based on config and capability
684    ///
685    /// Key distinction:
686    /// - Commits: preset controls format (conventional = no emojis)
687    /// - Non-commits (PR, review, changelog, `release_notes`): `use_gitmoji` controls emojis
688    fn inject_style_instructions(&self, system_prompt: &mut String, capability: &str) {
689        let Some(config) = &self.config else {
690            return;
691        };
692
693        let preset_name = config.get_effective_preset_name();
694        let is_conventional = preset_name == "conventional";
695        let is_default_mode = preset_name == "default" || preset_name.is_empty();
696
697        // For commits in default mode with no explicit gitmoji override, use style detection
698        let use_style_detection =
699            capability == "commit" && is_default_mode && config.gitmoji_override.is_none();
700
701        // Commit emoji: respects preset (conventional = no emoji)
702        let commit_emoji = config.use_gitmoji && !is_conventional && !use_style_detection;
703
704        // Output emoji: independent of preset, only respects use_gitmoji setting
705        // CLI --gitmoji/--no-gitmoji override is already applied to config.use_gitmoji
706        let output_emoji = config.gitmoji_override.unwrap_or(config.use_gitmoji);
707
708        // Inject instruction preset if configured (skip for default mode)
709        if !preset_name.is_empty() && !is_default_mode {
710            let library = crate::instruction_presets::get_instruction_preset_library();
711            if let Some(preset) = library.get_preset(preset_name) {
712                tracing::info!("📋 Injecting '{}' preset style instructions", preset_name);
713                system_prompt.push_str("\n\n=== STYLE INSTRUCTIONS ===\n");
714                system_prompt.push_str(&preset.instructions);
715                system_prompt.push('\n');
716            } else {
717                tracing::warn!("⚠️ Preset '{}' not found in library", preset_name);
718            }
719        }
720
721        // Handle commit-specific styling (structured JSON output with emoji field)
722        if capability == "commit" {
723            if use_style_detection {
724                tracing::info!("🔍 Using local commit style detection (default mode)");
725            } else if commit_emoji {
726                system_prompt.push_str("\n\n=== GITMOJI INSTRUCTIONS ===\n");
727                system_prompt.push_str("Set the 'emoji' field to a single relevant gitmoji. ");
728                system_prompt.push_str(
729                    "DO NOT include the emoji in the 'message' or 'title' text - only set the 'emoji' field. ",
730                );
731                system_prompt.push_str("Choose the most relevant emoji from this list:\n\n");
732                system_prompt.push_str(&crate::gitmoji::get_gitmoji_list());
733                system_prompt.push_str("\n\nThe emoji should match the primary type of change.");
734            } else if is_conventional {
735                system_prompt.push_str("\n\n=== CONVENTIONAL COMMITS FORMAT ===\n");
736                system_prompt.push_str("IMPORTANT: This uses Conventional Commits format. ");
737                system_prompt
738                    .push_str("DO NOT include any emojis in the commit message or PR title. ");
739                system_prompt.push_str("The 'emoji' field should be null.");
740            }
741        }
742
743        // Handle non-commit outputs: use output_emoji (independent of preset)
744        if capability == "pr" || capability == "review" {
745            if output_emoji {
746                Self::inject_pr_review_emoji_styling(system_prompt);
747            } else {
748                Self::inject_no_emoji_styling(system_prompt);
749            }
750        }
751
752        if capability == "release_notes" && output_emoji {
753            Self::inject_release_notes_emoji_styling(system_prompt);
754        } else if capability == "release_notes" {
755            Self::inject_no_emoji_styling(system_prompt);
756        }
757
758        if capability == "changelog" && output_emoji {
759            Self::inject_changelog_emoji_styling(system_prompt);
760        } else if capability == "changelog" {
761            Self::inject_no_emoji_styling(system_prompt);
762        }
763    }
764
765    fn inject_pr_review_emoji_styling(prompt: &mut String) {
766        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
767        prompt.push_str("Use emojis to make the output visually scannable and engaging:\n");
768        prompt.push_str("- H1 title: ONE gitmoji at the start (✨, 🐛, ♻️, etc.)\n");
769        prompt.push_str("- Section headers: Add relevant emojis (🎯 What's New, ⚙️ How It Works, 📋 Commits, ⚠️ Breaking Changes)\n");
770        prompt.push_str("- Commit list entries: Include gitmoji where appropriate\n");
771        prompt.push_str("- Body text: Keep clean - no scattered emojis within prose\n\n");
772        prompt.push_str("Choose from this gitmoji list:\n\n");
773        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
774    }
775
776    fn inject_release_notes_emoji_styling(prompt: &mut String) {
777        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
778        prompt.push_str("Use at most one emoji per highlight/section title. No emojis in bullet descriptions, upgrade notes, or metrics. ");
779        prompt.push_str("Pick from the approved gitmoji list (e.g., 🌟 Highlights, 🤖 Agents, 🔧 Tooling, 🐛 Fixes, ⚡ Performance). ");
780        prompt.push_str("Never sprinkle emojis within sentences or JSON keys.\n\n");
781        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
782    }
783
784    fn inject_changelog_emoji_styling(prompt: &mut String) {
785        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
786        prompt.push_str("Section keys must remain plain text (Added/Changed/Deprecated/Removed/Fixed/Security). ");
787        prompt.push_str(
788            "You may include one emoji within a change description to reinforce meaning. ",
789        );
790        prompt.push_str(
791            "Never add emojis to JSON keys, section names, metrics, or upgrade notes.\n\n",
792        );
793        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
794    }
795
796    fn inject_no_emoji_styling(prompt: &mut String) {
797        prompt.push_str("\n\n=== NO EMOJI STYLING ===\n");
798        prompt.push_str(
799            "DO NOT include any emojis anywhere in the output. Keep all content plain text.",
800        );
801    }
802
803    /// Execute a task with the given capability and user prompt
804    ///
805    /// This now automatically uses structured output based on the capability type
806    pub async fn execute_task(
807        &mut self,
808        capability: &str,
809        user_prompt: &str,
810    ) -> Result<StructuredResponse> {
811        use crate::agents::status::IrisPhase;
812        use crate::messages::get_capability_message;
813
814        // Show initializing status with a capability-specific message
815        let waiting_msg = get_capability_message(capability);
816        crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
817
818        // Load the capability config to get both prompt and output type
819        let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
820
821        // Inject style instructions (presets, gitmoji, conventional commits)
822        self.inject_style_instructions(&mut system_prompt, capability);
823
824        // Set the current capability
825        self.current_capability = Some(capability.to_string());
826
827        // Update status - analyzing with agent
828        crate::iris_status_dynamic!(
829            IrisPhase::Analysis,
830            "🔍 Iris is analyzing your changes...",
831            2,
832            4
833        );
834
835        // Use agent with tools for all structured outputs
836        // The agent will use tools as needed and respond with JSON
837        match output_type.as_str() {
838            "GeneratedMessage" => {
839                let response = self
840                    .execute_with_agent::<crate::types::GeneratedMessage>(
841                        &system_prompt,
842                        user_prompt,
843                    )
844                    .await?;
845                Ok(StructuredResponse::CommitMessage(response))
846            }
847            "MarkdownPullRequest" => {
848                let response = self
849                    .execute_with_agent::<crate::types::MarkdownPullRequest>(
850                        &system_prompt,
851                        user_prompt,
852                    )
853                    .await?;
854                Ok(StructuredResponse::PullRequest(response))
855            }
856            "MarkdownChangelog" => {
857                let response = self
858                    .execute_with_agent::<crate::types::MarkdownChangelog>(
859                        &system_prompt,
860                        user_prompt,
861                    )
862                    .await?;
863                Ok(StructuredResponse::Changelog(response))
864            }
865            "MarkdownReleaseNotes" => {
866                let response = self
867                    .execute_with_agent::<crate::types::MarkdownReleaseNotes>(
868                        &system_prompt,
869                        user_prompt,
870                    )
871                    .await?;
872                Ok(StructuredResponse::ReleaseNotes(response))
873            }
874            "MarkdownReview" => {
875                let response = self
876                    .execute_with_agent::<crate::types::MarkdownReview>(&system_prompt, user_prompt)
877                    .await?;
878                Ok(StructuredResponse::MarkdownReview(response))
879            }
880            "SemanticBlame" => {
881                // For semantic blame, we want plain text response
882                let agent = self.build_agent()?;
883                let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
884                let response = agent.prompt_multi_turn(&full_prompt, 10).await?;
885                Ok(StructuredResponse::SemanticBlame(response))
886            }
887            _ => {
888                // Fallback to regular agent for unknown types
889                let agent = self.build_agent()?;
890                let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
891                // Use multi_turn to allow tool calls even for unknown capability types
892                let response = agent.prompt_multi_turn(&full_prompt, 50).await?;
893                Ok(StructuredResponse::PlainText(response))
894            }
895        }
896    }
897
898    /// Execute a task with streaming, calling the callback with each text chunk
899    ///
900    /// This enables real-time display of LLM output in the TUI.
901    /// The callback receives `(chunk, aggregated_text)` for each delta.
902    ///
903    /// Returns the final structured response after streaming completes.
904    pub async fn execute_task_streaming<F>(
905        &mut self,
906        capability: &str,
907        user_prompt: &str,
908        mut on_chunk: F,
909    ) -> Result<StructuredResponse>
910    where
911        F: FnMut(&str, &str) + Send,
912    {
913        use crate::agents::status::IrisPhase;
914        use crate::messages::get_capability_message;
915        use futures::StreamExt;
916        use rig::agent::MultiTurnStreamItem;
917        use rig::streaming::{StreamedAssistantContent, StreamingPrompt};
918
919        // Show initializing status
920        let waiting_msg = get_capability_message(capability);
921        crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
922
923        // Load the capability config
924        let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
925
926        // Inject style instructions
927        self.inject_style_instructions(&mut system_prompt, capability);
928
929        // Set current capability
930        self.current_capability = Some(capability.to_string());
931
932        // Update status
933        crate::iris_status_dynamic!(
934            IrisPhase::Analysis,
935            "🔍 Iris is analyzing your changes...",
936            2,
937            4
938        );
939
940        // Build the full prompt (simplified for streaming - no JSON schema enforcement)
941        let full_prompt = format!(
942            "{}\n\n{}\n\n\
943            After using the available tools, respond with your analysis in markdown format.\n\
944            Keep it clear, well-structured, and informative.",
945            system_prompt, user_prompt
946        );
947
948        // Update status
949        let gen_msg = get_capability_message(capability);
950        crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
951
952        // Macro to consume a stream and aggregate text
953        macro_rules! consume_stream {
954            ($stream:expr) => {{
955                let mut aggregated_text = String::new();
956                let mut stream = $stream;
957                while let Some(item) = stream.next().await {
958                    match item {
959                        Ok(MultiTurnStreamItem::StreamAssistantItem(
960                            StreamedAssistantContent::Text(text),
961                        )) => {
962                            aggregated_text.push_str(&text.text);
963                            on_chunk(&text.text, &aggregated_text);
964                        }
965                        Ok(MultiTurnStreamItem::StreamAssistantItem(
966                            StreamedAssistantContent::ToolCall(tool_call),
967                        )) => {
968                            let tool_name = &tool_call.function.name;
969                            let reason = format!("Calling {}", tool_name);
970                            crate::iris_status_dynamic!(
971                                IrisPhase::ToolExecution {
972                                    tool_name: tool_name.clone(),
973                                    reason: reason.clone()
974                                },
975                                format!("🔧 {}", reason),
976                                3,
977                                4
978                            );
979                        }
980                        Ok(MultiTurnStreamItem::FinalResponse(_)) => break,
981                        Err(e) => return Err(anyhow::anyhow!("Streaming error: {}", e)),
982                        _ => {}
983                    }
984                }
985                aggregated_text
986            }};
987        }
988
989        // Build and stream per-provider (streaming types are model-specific)
990        let aggregated_text = match self.provider.as_str() {
991            "openai" => {
992                let agent = self.build_openai_agent_for_streaming(&full_prompt)?;
993                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
994                consume_stream!(stream)
995            }
996            "anthropic" => {
997                let agent = self.build_anthropic_agent_for_streaming(&full_prompt)?;
998                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
999                consume_stream!(stream)
1000            }
1001            "google" | "gemini" => {
1002                let agent = self.build_gemini_agent_for_streaming(&full_prompt)?;
1003                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
1004                consume_stream!(stream)
1005            }
1006            _ => return Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
1007        };
1008
1009        // Update status
1010        crate::iris_status_dynamic!(
1011            IrisPhase::Synthesis,
1012            "✨ Iris is synthesizing results...",
1013            4,
1014            4
1015        );
1016
1017        let response = Self::text_to_structured_response(&output_type, aggregated_text);
1018        crate::iris_status_completed!();
1019        Ok(response)
1020    }
1021
1022    /// Convert raw text to the appropriate structured response type
1023    fn text_to_structured_response(output_type: &str, text: String) -> StructuredResponse {
1024        match output_type {
1025            "MarkdownReview" => {
1026                StructuredResponse::MarkdownReview(crate::types::MarkdownReview { content: text })
1027            }
1028            "MarkdownPullRequest" => {
1029                StructuredResponse::PullRequest(crate::types::MarkdownPullRequest { content: text })
1030            }
1031            "MarkdownChangelog" => {
1032                StructuredResponse::Changelog(crate::types::MarkdownChangelog { content: text })
1033            }
1034            "MarkdownReleaseNotes" => {
1035                StructuredResponse::ReleaseNotes(crate::types::MarkdownReleaseNotes {
1036                    content: text,
1037                })
1038            }
1039            "SemanticBlame" => StructuredResponse::SemanticBlame(text),
1040            _ => StructuredResponse::PlainText(text),
1041        }
1042    }
1043
1044    /// Build `OpenAI` agent for streaming (with tools attached)
1045    fn build_openai_agent_for_streaming(
1046        &self,
1047        _prompt: &str,
1048    ) -> Result<rig::agent::Agent<provider::OpenAIModel>> {
1049        use crate::agents::debug_tool::DebugTool;
1050
1051        let fast_model = self.effective_fast_model();
1052        let api_key = self.get_api_key();
1053        let subagent_timeout = self
1054            .config
1055            .as_ref()
1056            .map_or(120, |c| c.subagent_timeout_secs);
1057
1058        // Build subagent
1059        let sub_agent = crate::attach_core_tools!(
1060            provider::openai_builder(fast_model, api_key)?
1061                .name("analyze_subagent")
1062                .preamble("You are a specialized analysis sub-agent.")
1063                .max_tokens(4096)
1064        )
1065        .build();
1066
1067        // Build main agent with tools
1068        let builder = provider::openai_builder(&self.model, api_key)?
1069            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1070            .max_tokens(16384);
1071
1072        let builder = crate::attach_core_tools!(builder)
1073            .tool(DebugTool::new(GitRepoInfo))
1074            .tool(DebugTool::new(self.workspace.clone()))
1075            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1076                &self.provider,
1077                fast_model,
1078                subagent_timeout,
1079                api_key,
1080            )?))
1081            .tool(sub_agent);
1082
1083        // Conditionally attach content update tools for chat mode
1084        if let Some(sender) = &self.content_update_sender {
1085            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1086            Ok(builder
1087                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1088                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1089                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1090                .build())
1091        } else {
1092            Ok(builder.build())
1093        }
1094    }
1095
1096    /// Build Anthropic agent for streaming (with tools attached)
1097    fn build_anthropic_agent_for_streaming(
1098        &self,
1099        _prompt: &str,
1100    ) -> Result<rig::agent::Agent<provider::AnthropicModel>> {
1101        use crate::agents::debug_tool::DebugTool;
1102
1103        let fast_model = self.effective_fast_model();
1104        let api_key = self.get_api_key();
1105        let subagent_timeout = self
1106            .config
1107            .as_ref()
1108            .map_or(120, |c| c.subagent_timeout_secs);
1109
1110        // Build subagent
1111        let sub_agent = crate::attach_core_tools!(
1112            provider::anthropic_builder(fast_model, api_key)?
1113                .name("analyze_subagent")
1114                .preamble("You are a specialized analysis sub-agent.")
1115                .max_tokens(4096)
1116        )
1117        .build();
1118
1119        // Build main agent with tools
1120        let builder = provider::anthropic_builder(&self.model, api_key)?
1121            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1122            .max_tokens(16384);
1123
1124        let builder = crate::attach_core_tools!(builder)
1125            .tool(DebugTool::new(GitRepoInfo))
1126            .tool(DebugTool::new(self.workspace.clone()))
1127            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1128                &self.provider,
1129                fast_model,
1130                subagent_timeout,
1131                api_key,
1132            )?))
1133            .tool(sub_agent);
1134
1135        // Conditionally attach content update tools for chat mode
1136        if let Some(sender) = &self.content_update_sender {
1137            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1138            Ok(builder
1139                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1140                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1141                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1142                .build())
1143        } else {
1144            Ok(builder.build())
1145        }
1146    }
1147
1148    /// Build Gemini agent for streaming (with tools attached)
1149    fn build_gemini_agent_for_streaming(
1150        &self,
1151        _prompt: &str,
1152    ) -> Result<rig::agent::Agent<provider::GeminiModel>> {
1153        use crate::agents::debug_tool::DebugTool;
1154
1155        let fast_model = self.effective_fast_model();
1156        let api_key = self.get_api_key();
1157        let subagent_timeout = self
1158            .config
1159            .as_ref()
1160            .map_or(120, |c| c.subagent_timeout_secs);
1161
1162        // Build subagent
1163        let sub_agent = crate::attach_core_tools!(
1164            provider::gemini_builder(fast_model, api_key)?
1165                .name("analyze_subagent")
1166                .preamble("You are a specialized analysis sub-agent.")
1167                .max_tokens(4096)
1168        )
1169        .build();
1170
1171        // Build main agent with tools
1172        let builder = provider::gemini_builder(&self.model, api_key)?
1173            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1174            .max_tokens(16384);
1175
1176        let builder = crate::attach_core_tools!(builder)
1177            .tool(DebugTool::new(GitRepoInfo))
1178            .tool(DebugTool::new(self.workspace.clone()))
1179            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1180                &self.provider,
1181                fast_model,
1182                subagent_timeout,
1183                api_key,
1184            )?))
1185            .tool(sub_agent);
1186
1187        // Conditionally attach content update tools for chat mode
1188        if let Some(sender) = &self.content_update_sender {
1189            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1190            Ok(builder
1191                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1192                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1193                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1194                .build())
1195        } else {
1196            Ok(builder.build())
1197        }
1198    }
1199
1200    /// Load capability configuration from embedded TOML, returning both prompt and output type
1201    fn load_capability_config(&self, capability: &str) -> Result<(String, String)> {
1202        let _ = self; // Keep &self for method syntax consistency
1203        // Use embedded capability strings - always available regardless of working directory
1204        let content = match capability {
1205            "commit" => CAPABILITY_COMMIT,
1206            "pr" => CAPABILITY_PR,
1207            "review" => CAPABILITY_REVIEW,
1208            "changelog" => CAPABILITY_CHANGELOG,
1209            "release_notes" => CAPABILITY_RELEASE_NOTES,
1210            "chat" => CAPABILITY_CHAT,
1211            "semantic_blame" => CAPABILITY_SEMANTIC_BLAME,
1212            _ => {
1213                // Return generic prompt for unknown capabilities
1214                return Ok((
1215                    format!(
1216                        "You are helping with a {capability} task. Use the available Git tools to assist the user."
1217                    ),
1218                    "PlainText".to_string(),
1219                ));
1220            }
1221        };
1222
1223        // Parse TOML to extract both task_prompt and output_type
1224        let parsed: toml::Value = toml::from_str(content)?;
1225
1226        let task_prompt = parsed
1227            .get("task_prompt")
1228            .and_then(|v| v.as_str())
1229            .ok_or_else(|| anyhow::anyhow!("No task_prompt found in capability file"))?;
1230
1231        let output_type = parsed
1232            .get("output_type")
1233            .and_then(|v| v.as_str())
1234            .unwrap_or("PlainText")
1235            .to_string();
1236
1237        Ok((task_prompt.to_string(), output_type))
1238    }
1239
1240    /// Get the current capability being executed
1241    pub fn current_capability(&self) -> Option<&str> {
1242        self.current_capability.as_deref()
1243    }
1244
1245    /// Simple single-turn execution for basic queries
1246    pub async fn chat(&self, message: &str) -> Result<String> {
1247        let agent = self.build_agent()?;
1248        let response = agent.prompt(message).await?;
1249        Ok(response)
1250    }
1251
1252    /// Set the current capability
1253    pub fn set_capability(&mut self, capability: &str) {
1254        self.current_capability = Some(capability.to_string());
1255    }
1256
1257    /// Get provider configuration
1258    pub fn provider_config(&self) -> &HashMap<String, String> {
1259        &self.provider_config
1260    }
1261
1262    /// Set provider configuration
1263    pub fn set_provider_config(&mut self, config: HashMap<String, String>) {
1264        self.provider_config = config;
1265    }
1266
1267    /// Set custom preamble
1268    pub fn set_preamble(&mut self, preamble: String) {
1269        self.preamble = Some(preamble);
1270    }
1271
1272    /// Set configuration
1273    pub fn set_config(&mut self, config: crate::config::Config) {
1274        self.config = Some(config);
1275    }
1276
1277    /// Set fast model for subagents
1278    pub fn set_fast_model(&mut self, fast_model: String) {
1279        self.fast_model = Some(fast_model);
1280    }
1281}
1282
1283/// Builder for creating `IrisAgent` instances with different configurations
1284pub struct IrisAgentBuilder {
1285    provider: String,
1286    model: String,
1287    preamble: Option<String>,
1288}
1289
1290impl IrisAgentBuilder {
1291    /// Create a new builder
1292    pub fn new() -> Self {
1293        Self {
1294            provider: "openai".to_string(),
1295            model: "gpt-4o".to_string(),
1296            preamble: None,
1297        }
1298    }
1299
1300    /// Set the provider to use
1301    pub fn with_provider(mut self, provider: impl Into<String>) -> Self {
1302        self.provider = provider.into();
1303        self
1304    }
1305
1306    /// Set the model to use
1307    pub fn with_model(mut self, model: impl Into<String>) -> Self {
1308        self.model = model.into();
1309        self
1310    }
1311
1312    /// Set a custom preamble
1313    pub fn with_preamble(mut self, preamble: impl Into<String>) -> Self {
1314        self.preamble = Some(preamble.into());
1315        self
1316    }
1317
1318    /// Build the `IrisAgent`
1319    pub fn build(self) -> Result<IrisAgent> {
1320        let mut agent = IrisAgent::new(&self.provider, &self.model)?;
1321
1322        // Apply custom preamble if provided
1323        if let Some(preamble) = self.preamble {
1324            agent.set_preamble(preamble);
1325        }
1326
1327        Ok(agent)
1328    }
1329}
1330
1331impl Default for IrisAgentBuilder {
1332    fn default() -> Self {
1333        Self::new()
1334    }
1335}
1336
1337#[cfg(test)]
1338mod tests {
1339    use super::sanitize_json_response;
1340    use serde_json::Value;
1341    use std::borrow::Cow;
1342
1343    #[test]
1344    fn sanitize_json_response_is_noop_for_valid_payloads() {
1345        let raw = r#"{"title":"Test","description":"All good"}"#;
1346        let sanitized = sanitize_json_response(raw);
1347        assert!(matches!(sanitized, Cow::Borrowed(_)));
1348        serde_json::from_str::<Value>(sanitized.as_ref()).expect("valid JSON");
1349    }
1350
1351    #[test]
1352    fn sanitize_json_response_escapes_literal_newlines() {
1353        let raw = "{\"description\": \"Line1
1354Line2\"}";
1355        let sanitized = sanitize_json_response(raw);
1356        assert_eq!(sanitized.as_ref(), "{\"description\": \"Line1\\nLine2\"}");
1357        serde_json::from_str::<Value>(sanitized.as_ref()).expect("json sanitized");
1358    }
1359}