git_iris/agents/
iris.rs

1//! Iris Agent - The unified AI agent for Git-Iris operations
2//!
3//! This agent can handle any Git workflow task through capability-based prompts
4//! and multi-turn execution using Rig. One agent to rule them all! ✨
5
6use anyhow::Result;
7use rig::agent::{AgentBuilder, PromptResponse};
8use rig::completion::CompletionModel;
9use schemars::JsonSchema;
10use serde::de::DeserializeOwned;
11use serde::{Deserialize, Serialize};
12use serde_json::json;
13use std::borrow::Cow;
14use std::collections::HashMap;
15use std::fmt;
16
17// Embed capability TOML files at compile time so they're always available
18const CAPABILITY_COMMIT: &str = include_str!("capabilities/commit.toml");
19const CAPABILITY_PR: &str = include_str!("capabilities/pr.toml");
20const CAPABILITY_REVIEW: &str = include_str!("capabilities/review.toml");
21const CAPABILITY_CHANGELOG: &str = include_str!("capabilities/changelog.toml");
22const CAPABILITY_RELEASE_NOTES: &str = include_str!("capabilities/release_notes.toml");
23const CAPABILITY_CHAT: &str = include_str!("capabilities/chat.toml");
24const CAPABILITY_SEMANTIC_BLAME: &str = include_str!("capabilities/semantic_blame.toml");
25
26/// Default preamble for Iris agent
27const DEFAULT_PREAMBLE: &str = "\
28You are Iris, a helpful AI assistant specialized in Git operations and workflows.
29
30You have access to Git tools, code analysis tools, and powerful sub-agent capabilities for handling large analyses.
31
32**File Access Tools:**
33- **file_read** - Read file contents directly. Use `start_line` and `num_lines` for large files.
34- **file_analyzer** - Get metadata and structure analysis of files.
35- **code_search** - Search for patterns across files. Use sparingly; prefer file_read for known files.
36
37**Sub-Agent Tools:**
38
391. **parallel_analyze** - Run multiple analysis tasks CONCURRENTLY with independent context windows
40   - Best for: Large changesets (>500 lines or >20 files), batch commit analysis
41   - Each task runs in its own subagent, preventing context overflow
42   - Example: parallel_analyze({ \"tasks\": [\"Analyze auth/ changes for security\", \"Review db/ for performance\", \"Check api/ for breaking changes\"] })
43
442. **analyze_subagent** - Delegate a single focused task to a sub-agent
45   - Best for: Deep dive on specific files or focused analysis
46
47**Best Practices:**
48- Use git_diff to get changes first - it includes file content
49- Use file_read to read files directly instead of multiple code_search calls
50- Use parallel_analyze for large changesets to avoid context overflow";
51
52use crate::agents::provider::{self, DynAgent};
53use crate::agents::tools::{GitRepoInfo, ParallelAnalyze, Workspace};
54
55/// Trait for streaming callback to handle real-time response processing
56#[async_trait::async_trait]
57pub trait StreamingCallback: Send + Sync {
58    /// Called when a new chunk of text is received
59    async fn on_chunk(
60        &self,
61        chunk: &str,
62        tokens: Option<crate::agents::status::TokenMetrics>,
63    ) -> Result<()>;
64
65    /// Called when the response is complete
66    async fn on_complete(
67        &self,
68        full_response: &str,
69        final_tokens: crate::agents::status::TokenMetrics,
70    ) -> Result<()>;
71
72    /// Called when an error occurs
73    async fn on_error(&self, error: &anyhow::Error) -> Result<()>;
74
75    /// Called for status updates
76    async fn on_status_update(&self, message: &str) -> Result<()>;
77}
78
79/// Unified response type that can hold any structured output
80#[derive(Debug, Clone, Serialize, Deserialize)]
81pub enum StructuredResponse {
82    CommitMessage(crate::types::GeneratedMessage),
83    PullRequest(crate::types::MarkdownPullRequest),
84    Changelog(crate::types::MarkdownChangelog),
85    ReleaseNotes(crate::types::MarkdownReleaseNotes),
86    /// Markdown-based review (LLM-driven structure)
87    MarkdownReview(crate::types::MarkdownReview),
88    /// Semantic blame explanation (plain text)
89    SemanticBlame(String),
90    PlainText(String),
91}
92
93impl fmt::Display for StructuredResponse {
94    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
95        match self {
96            StructuredResponse::CommitMessage(msg) => {
97                write!(f, "{}", crate::types::format_commit_message(msg))
98            }
99            StructuredResponse::PullRequest(pr) => {
100                write!(f, "{}", pr.raw_content())
101            }
102            StructuredResponse::Changelog(cl) => {
103                write!(f, "{}", cl.raw_content())
104            }
105            StructuredResponse::ReleaseNotes(rn) => {
106                write!(f, "{}", rn.raw_content())
107            }
108            StructuredResponse::MarkdownReview(review) => {
109                write!(f, "{}", review.format())
110            }
111            StructuredResponse::SemanticBlame(explanation) => {
112                write!(f, "{explanation}")
113            }
114            StructuredResponse::PlainText(text) => {
115                write!(f, "{text}")
116            }
117        }
118    }
119}
120
121/// Extract JSON from a potentially verbose response that might contain explanations
122fn extract_json_from_response(response: &str) -> Result<String> {
123    use crate::agents::debug;
124
125    debug::debug_section("JSON Extraction");
126
127    let trimmed_response = response.trim();
128
129    // First, try parsing the entire response as JSON (for well-behaved responses)
130    if trimmed_response.starts_with('{')
131        && serde_json::from_str::<serde_json::Value>(trimmed_response).is_ok()
132    {
133        debug::debug_context_management(
134            "Response is pure JSON",
135            &format!("{} characters", trimmed_response.len()),
136        );
137        return Ok(trimmed_response.to_string());
138    }
139
140    // Try to find JSON within markdown code blocks
141    if let Some(start) = response.find("```json") {
142        let content_start = start + "```json".len();
143        // Find the closing ``` on its own line (to avoid matching ``` inside JSON strings)
144        // First try with newline prefix to find standalone closing marker
145        let json_end = if let Some(end) = response[content_start..].find("\n```") {
146            // Found it with newline - the JSON ends before the newline
147            end
148        } else {
149            // Fallback: try to find ``` at start of response section or end of string
150            response[content_start..]
151                .find("```")
152                .unwrap_or(response.len() - content_start)
153        };
154
155        let json_content = &response[content_start..content_start + json_end];
156        let trimmed = json_content.trim().to_string();
157
158        debug::debug_context_management(
159            "Found JSON in markdown code block",
160            &format!("{} characters", trimmed.len()),
161        );
162
163        // Save extracted JSON for debugging
164        if let Err(e) = debug::write_debug_artifact("iris_extracted.json", &trimmed) {
165            debug::debug_warning(&format!("Failed to write extracted JSON: {}", e));
166        }
167
168        debug::debug_json_parse_attempt(&trimmed);
169        return Ok(trimmed);
170    }
171
172    // Look for JSON objects by finding { and matching }
173    let mut brace_count = 0;
174    let mut json_start = None;
175    let mut json_end = None;
176
177    for (i, ch) in response.char_indices() {
178        match ch {
179            '{' => {
180                if brace_count == 0 {
181                    json_start = Some(i);
182                }
183                brace_count += 1;
184            }
185            '}' => {
186                brace_count -= 1;
187                if brace_count == 0 && json_start.is_some() {
188                    json_end = Some(i + 1);
189                    break;
190                }
191            }
192            _ => {}
193        }
194    }
195
196    if let (Some(start), Some(end)) = (json_start, json_end) {
197        let json_content = &response[start..end];
198        debug::debug_json_parse_attempt(json_content);
199
200        // Try to sanitize before validating (control characters in strings)
201        let sanitized = sanitize_json_response(json_content);
202
203        // Validate it's actually JSON by attempting to parse it
204        let _: serde_json::Value = serde_json::from_str(&sanitized).map_err(|e| {
205            debug::debug_json_parse_error(&format!(
206                "Found JSON-like content but it's not valid JSON: {}",
207                e
208            ));
209            // Include more context in the error for debugging
210            let preview = if json_content.len() > 200 {
211                format!("{}...", &json_content[..200])
212            } else {
213                json_content.to_string()
214            };
215            anyhow::anyhow!(
216                "Found JSON-like content but it's not valid JSON: {}\nPreview: {}",
217                e,
218                preview
219            )
220        })?;
221
222        debug::debug_context_management(
223            "Found valid JSON object",
224            &format!("{} characters", json_content.len()),
225        );
226        return Ok(sanitized.into_owned());
227    }
228
229    // If no JSON found, check if the response is raw markdown that we can wrap
230    // This handles cases where the model returns markdown directly without JSON wrapper
231    let trimmed = response.trim();
232    if trimmed.starts_with('#') || trimmed.starts_with("##") {
233        debug::debug_context_management(
234            "Detected raw markdown response",
235            "Wrapping in JSON structure",
236        );
237        // Escape the markdown content for JSON and wrap it
238        let escaped_content = serde_json::to_string(trimmed)?;
239        // escaped_content includes quotes, so we need to use it directly as the value
240        let wrapped = format!(r#"{{"content": {}}}"#, escaped_content);
241        debug::debug_json_parse_attempt(&wrapped);
242        return Ok(wrapped);
243    }
244
245    // If no JSON found, return error
246    debug::debug_json_parse_error("No valid JSON found in response");
247    Err(anyhow::anyhow!("No valid JSON found in response"))
248}
249
250/// Some providers (Anthropic) occasionally send literal control characters like newlines
251/// inside JSON strings, which violates strict JSON parsing rules. This helper sanitizes
252/// those responses by escaping control characters only within string literals while
253/// leaving the rest of the payload untouched.
254fn sanitize_json_response(raw: &str) -> Cow<'_, str> {
255    let mut needs_sanitization = false;
256    let mut in_string = false;
257    let mut escaped = false;
258
259    for ch in raw.chars() {
260        if in_string {
261            if escaped {
262                escaped = false;
263                continue;
264            }
265
266            match ch {
267                '\\' => escaped = true,
268                '"' => in_string = false,
269                '\n' | '\r' | '\t' => {
270                    needs_sanitization = true;
271                    break;
272                }
273                c if c.is_control() => {
274                    needs_sanitization = true;
275                    break;
276                }
277                _ => {}
278            }
279        } else if ch == '"' {
280            in_string = true;
281        }
282    }
283
284    if !needs_sanitization {
285        return Cow::Borrowed(raw);
286    }
287
288    let mut sanitized = String::with_capacity(raw.len());
289    in_string = false;
290    escaped = false;
291
292    for ch in raw.chars() {
293        if in_string {
294            if escaped {
295                sanitized.push(ch);
296                escaped = false;
297                continue;
298            }
299
300            match ch {
301                '\\' => {
302                    sanitized.push('\\');
303                    escaped = true;
304                }
305                '"' => {
306                    sanitized.push('"');
307                    in_string = false;
308                }
309                '\n' => sanitized.push_str("\\n"),
310                '\r' => sanitized.push_str("\\r"),
311                '\t' => sanitized.push_str("\\t"),
312                c if c.is_control() => {
313                    use std::fmt::Write as _;
314                    let _ = write!(&mut sanitized, "\\u{:04X}", u32::from(c));
315                }
316                _ => sanitized.push(ch),
317            }
318        } else {
319            sanitized.push(ch);
320            if ch == '"' {
321                in_string = true;
322                escaped = false;
323            }
324        }
325    }
326
327    Cow::Owned(sanitized)
328}
329
330/// Parse JSON with schema validation and error recovery
331///
332/// This function attempts to parse JSON with the following strategy:
333/// 1. Try direct parsing (fast path for well-formed responses)
334/// 2. If that fails, use the output validator for recovery
335/// 3. Log any warnings about recovered issues
336fn parse_with_recovery<T>(json_str: &str) -> Result<T>
337where
338    T: JsonSchema + DeserializeOwned,
339{
340    use crate::agents::debug as agent_debug;
341    use crate::agents::output_validator::validate_and_parse;
342
343    let validation_result = validate_and_parse::<T>(json_str)?;
344
345    // Log recovery warnings
346    if validation_result.recovered {
347        agent_debug::debug_context_management(
348            "JSON recovery applied",
349            &format!("{} issues fixed", validation_result.warnings.len()),
350        );
351        for warning in &validation_result.warnings {
352            agent_debug::debug_warning(warning);
353        }
354    }
355
356    validation_result
357        .value
358        .ok_or_else(|| anyhow::anyhow!("Failed to parse JSON even after recovery"))
359}
360
361/// The unified Iris agent that can handle any Git-Iris task
362///
363/// Note: This struct is Send + Sync safe - we don't store the client builder,
364/// instead we create it fresh when needed. This allows the agent to be used
365/// across async boundaries with `tokio::spawn`.
366pub struct IrisAgent {
367    provider: String,
368    model: String,
369    /// Fast model for subagents and simple tasks
370    fast_model: Option<String>,
371    /// Current capability/task being executed
372    current_capability: Option<String>,
373    /// Provider configuration
374    provider_config: HashMap<String, String>,
375    /// Custom preamble
376    preamble: Option<String>,
377    /// Configuration for features like gitmoji, presets, etc.
378    config: Option<crate::config::Config>,
379    /// Optional sender for content updates (used in Studio chat mode)
380    content_update_sender: Option<crate::agents::tools::ContentUpdateSender>,
381    /// Persistent workspace for notes and task tracking (shared across agent invocations)
382    workspace: Workspace,
383}
384
385impl IrisAgent {
386    /// Create a new Iris agent with the given provider and model
387    pub fn new(provider: &str, model: &str) -> Result<Self> {
388        Ok(Self {
389            provider: provider.to_string(),
390            model: model.to_string(),
391            fast_model: None,
392            current_capability: None,
393            provider_config: HashMap::new(),
394            preamble: None,
395            config: None,
396            content_update_sender: None,
397            workspace: Workspace::new(),
398        })
399    }
400
401    /// Set the content update sender for Studio chat mode
402    ///
403    /// When set, the agent will have access to tools for updating
404    /// commit messages, PR descriptions, and reviews.
405    pub fn set_content_update_sender(&mut self, sender: crate::agents::tools::ContentUpdateSender) {
406        self.content_update_sender = Some(sender);
407    }
408
409    /// Get the effective fast model (configured or same as main model)
410    fn effective_fast_model(&self) -> &str {
411        self.fast_model.as_deref().unwrap_or(&self.model)
412    }
413
414    /// Build the actual agent for execution
415    ///
416    /// Uses provider-specific builders (rig-core 0.27+) with enum dispatch for runtime
417    /// provider selection. Each provider arm builds both the subagent and main agent
418    /// with proper typing.
419    fn build_agent(&self) -> Result<DynAgent> {
420        use crate::agents::debug_tool::DebugTool;
421
422        let preamble = self.preamble.as_deref().unwrap_or(DEFAULT_PREAMBLE);
423        let fast_model = self.effective_fast_model();
424        let subagent_timeout = self
425            .config
426            .as_ref()
427            .map_or(120, |c| c.subagent_timeout_secs);
428
429        // Macro to build and configure subagent with core tools
430        macro_rules! build_subagent {
431            ($builder:expr) => {{
432                let builder = $builder
433                    .name("analyze_subagent")
434                    .description("Delegate focused analysis tasks to a sub-agent with its own context window. Use for analyzing specific files, commits, or code sections independently. The sub-agent has access to Git tools (diff, log, status) and file analysis tools.")
435                    .preamble("You are a specialized analysis sub-agent for Iris. Your job is to complete focused analysis tasks and return concise, actionable summaries.
436
437Guidelines:
438- Use the available tools to gather information
439- Focus only on what's asked - don't expand scope
440- Return a clear, structured summary of findings
441- Highlight important issues, patterns, or insights
442- Keep your response focused and concise")
443                    .max_tokens(4096);
444                let builder = self.apply_reasoning_defaults(builder);
445                crate::attach_core_tools!(builder).build()
446            }};
447        }
448
449        // Macro to attach main agent tools (excluding subagent which varies by type)
450        macro_rules! attach_main_tools {
451            ($builder:expr) => {{
452                crate::attach_core_tools!($builder)
453                    .tool(DebugTool::new(GitRepoInfo))
454                    .tool(DebugTool::new(self.workspace.clone()))
455                    .tool(DebugTool::new(ParallelAnalyze::with_timeout(
456                        &self.provider,
457                        fast_model,
458                        subagent_timeout,
459                    )?))
460            }};
461        }
462
463        // Macro to optionally attach content update tools
464        macro_rules! maybe_attach_update_tools {
465            ($builder:expr) => {{
466                if let Some(sender) = &self.content_update_sender {
467                    use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
468                    $builder
469                        .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
470                        .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
471                        .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
472                        .build()
473                } else {
474                    $builder.build()
475                }
476            }};
477        }
478
479        match self.provider.as_str() {
480            "openai" => {
481                // Build subagent
482                let sub_agent = build_subagent!(provider::openai_builder(fast_model));
483
484                // Build main agent
485                let builder = provider::openai_builder(&self.model)
486                    .preamble(preamble)
487                    .max_tokens(16384);
488                let builder = self.apply_reasoning_defaults(builder);
489                let builder = attach_main_tools!(builder).tool(sub_agent);
490                let agent = maybe_attach_update_tools!(builder);
491                Ok(DynAgent::OpenAI(agent))
492            }
493            "anthropic" => {
494                // Build subagent
495                let sub_agent = build_subagent!(provider::anthropic_builder(fast_model));
496
497                // Build main agent
498                let builder = provider::anthropic_builder(&self.model)
499                    .preamble(preamble)
500                    .max_tokens(16384);
501                let builder = self.apply_reasoning_defaults(builder);
502                let builder = attach_main_tools!(builder).tool(sub_agent);
503                let agent = maybe_attach_update_tools!(builder);
504                Ok(DynAgent::Anthropic(agent))
505            }
506            "google" | "gemini" => {
507                // Build subagent
508                let sub_agent = build_subagent!(provider::gemini_builder(fast_model));
509
510                // Build main agent
511                let builder = provider::gemini_builder(&self.model)
512                    .preamble(preamble)
513                    .max_tokens(16384);
514                let builder = self.apply_reasoning_defaults(builder);
515                let builder = attach_main_tools!(builder).tool(sub_agent);
516                let agent = maybe_attach_update_tools!(builder);
517                Ok(DynAgent::Gemini(agent))
518            }
519            _ => Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
520        }
521    }
522
523    fn apply_reasoning_defaults<M>(&self, builder: AgentBuilder<M>) -> AgentBuilder<M>
524    where
525        M: CompletionModel,
526    {
527        if self.provider == "openai" && Self::requires_reasoning_effort(&self.model) {
528            builder.additional_params(json!({
529                "reasoning": {
530                    "effort": "low"
531                }
532            }))
533        } else {
534            builder
535        }
536    }
537
538    fn requires_reasoning_effort(model: &str) -> bool {
539        let model = model.to_lowercase();
540        model.starts_with("gpt-5") || model.starts_with("gpt-4.1") || model.starts_with("o1")
541    }
542
543    /// Execute task using agent with tools and parse structured JSON response
544    /// This is the core method that enables Iris to use tools and generate structured outputs
545    async fn execute_with_agent<T>(&self, system_prompt: &str, user_prompt: &str) -> Result<T>
546    where
547        T: JsonSchema + for<'a> serde::Deserialize<'a> + serde::Serialize + Send + Sync + 'static,
548    {
549        use crate::agents::debug;
550        use crate::agents::status::IrisPhase;
551        use crate::messages::get_capability_message;
552        use schemars::schema_for;
553
554        let capability = self.current_capability().unwrap_or("commit");
555
556        debug::debug_phase_change(&format!("AGENT EXECUTION: {}", std::any::type_name::<T>()));
557
558        // Update status - building agent (capability-aware)
559        let msg = get_capability_message(capability);
560        crate::iris_status_dynamic!(IrisPhase::Planning, msg.text, 2, 4);
561
562        // Build agent with all tools attached
563        let agent = self.build_agent()?;
564        debug::debug_context_management(
565            "Agent built with tools",
566            &format!(
567                "Provider: {}, Model: {} (fast: {})",
568                self.provider,
569                self.model,
570                self.effective_fast_model()
571            ),
572        );
573
574        // Create JSON schema for the response type
575        let schema = schema_for!(T);
576        let schema_json = serde_json::to_string_pretty(&schema)?;
577        debug::debug_context_management(
578            "JSON schema created",
579            &format!("Type: {}", std::any::type_name::<T>()),
580        );
581
582        // Enhanced prompt that instructs Iris to use tools and respond with JSON
583        let full_prompt = format!(
584            "{system_prompt}\n\n{user_prompt}\n\n\
585            === CRITICAL: RESPONSE FORMAT ===\n\
586            After using the available tools to gather necessary information, you MUST respond with ONLY a valid JSON object.\n\n\
587            REQUIRED JSON SCHEMA:\n\
588            {schema_json}\n\n\
589            CRITICAL INSTRUCTIONS:\n\
590            - Return ONLY the raw JSON object - nothing else\n\
591            - NO explanations before the JSON\n\
592            - NO explanations after the JSON\n\
593            - NO markdown code blocks (just raw JSON)\n\
594            - NO preamble text like 'Here is the JSON:' or 'Let me generate:'\n\
595            - Start your response with {{ and end with }}\n\
596            - The JSON must be complete and valid\n\n\
597            Your entire response should be ONLY the JSON object."
598        );
599
600        debug::debug_llm_request(&full_prompt, Some(16384));
601
602        // Update status - generation phase (capability-aware)
603        let gen_msg = get_capability_message(capability);
604        crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
605
606        // Prompt the agent with multi-turn support
607        // Set multi_turn to allow the agent to call multiple tools (default is 0 = single-shot)
608        // For complex tasks like PRs and release notes, Iris may need many tool calls to analyze all changes
609        // The agent knows when to stop, so we give it plenty of room (50 rounds)
610        let timer = debug::DebugTimer::start("Agent prompt execution");
611
612        debug::debug_context_management(
613            "LLM request",
614            "Sending prompt to agent with multi_turn(50)",
615        );
616        let prompt_response: PromptResponse = agent.prompt_extended(&full_prompt, 50).await?;
617
618        timer.finish();
619
620        // Extract usage stats for debug output
621        let usage = &prompt_response.total_usage;
622        debug::debug_context_management(
623            "Token usage",
624            &format!(
625                "input: {} | output: {} | total: {}",
626                usage.input_tokens, usage.output_tokens, usage.total_tokens
627            ),
628        );
629
630        let response = &prompt_response.output;
631        #[allow(clippy::cast_possible_truncation, clippy::as_conversions)]
632        let total_tokens_usize = usage.total_tokens as usize;
633        debug::debug_llm_response(
634            response,
635            std::time::Duration::from_secs(0),
636            Some(total_tokens_usize),
637        );
638
639        // Update status - synthesis phase
640        crate::iris_status_dynamic!(
641            IrisPhase::Synthesis,
642            "✨ Iris is synthesizing results...",
643            4,
644            4
645        );
646
647        // Extract and parse JSON from the response
648        let json_str = extract_json_from_response(response)?;
649        let sanitized_json = sanitize_json_response(&json_str);
650        let sanitized_ref = sanitized_json.as_ref();
651
652        if matches!(sanitized_json, Cow::Borrowed(_)) {
653            debug::debug_json_parse_attempt(sanitized_ref);
654        } else {
655            debug::debug_context_management(
656                "Sanitized JSON response",
657                &format!("{} → {} characters", json_str.len(), sanitized_ref.len()),
658            );
659            debug::debug_json_parse_attempt(sanitized_ref);
660        }
661
662        // Use the output validator for robust parsing with error recovery
663        let result: T = parse_with_recovery(sanitized_ref)?;
664
665        debug::debug_json_parse_success(std::any::type_name::<T>());
666
667        // Update status - completed
668        crate::iris_status_completed!();
669
670        Ok(result)
671    }
672
673    /// Inject style instructions into the system prompt based on config and capability
674    ///
675    /// Key distinction:
676    /// - Commits: preset controls format (conventional = no emojis)
677    /// - Non-commits (PR, review, changelog, `release_notes`): `use_gitmoji` controls emojis
678    fn inject_style_instructions(&self, system_prompt: &mut String, capability: &str) {
679        let Some(config) = &self.config else {
680            return;
681        };
682
683        let preset_name = config.get_effective_preset_name();
684        let is_conventional = preset_name == "conventional";
685        let is_default_mode = preset_name == "default" || preset_name.is_empty();
686
687        // For commits in default mode with no explicit gitmoji override, use style detection
688        let use_style_detection =
689            capability == "commit" && is_default_mode && config.gitmoji_override.is_none();
690
691        // Commit emoji: respects preset (conventional = no emoji)
692        let commit_emoji = config.use_gitmoji && !is_conventional && !use_style_detection;
693
694        // Output emoji: independent of preset, only respects use_gitmoji setting
695        // CLI --gitmoji/--no-gitmoji override is already applied to config.use_gitmoji
696        let output_emoji = config.gitmoji_override.unwrap_or(config.use_gitmoji);
697
698        // Inject instruction preset if configured (skip for default mode)
699        if !preset_name.is_empty() && !is_default_mode {
700            let library = crate::instruction_presets::get_instruction_preset_library();
701            if let Some(preset) = library.get_preset(preset_name) {
702                tracing::info!("📋 Injecting '{}' preset style instructions", preset_name);
703                system_prompt.push_str("\n\n=== STYLE INSTRUCTIONS ===\n");
704                system_prompt.push_str(&preset.instructions);
705                system_prompt.push('\n');
706            } else {
707                tracing::warn!("⚠️ Preset '{}' not found in library", preset_name);
708            }
709        }
710
711        // Handle commit-specific styling (structured JSON output with emoji field)
712        if capability == "commit" {
713            if use_style_detection {
714                tracing::info!("🔍 Using local commit style detection (default mode)");
715            } else if commit_emoji {
716                system_prompt.push_str("\n\n=== GITMOJI INSTRUCTIONS ===\n");
717                system_prompt.push_str("Set the 'emoji' field to a single relevant gitmoji. ");
718                system_prompt.push_str(
719                    "DO NOT include the emoji in the 'message' or 'title' text - only set the 'emoji' field. ",
720                );
721                system_prompt.push_str("Choose the most relevant emoji from this list:\n\n");
722                system_prompt.push_str(&crate::gitmoji::get_gitmoji_list());
723                system_prompt.push_str("\n\nThe emoji should match the primary type of change.");
724            } else if is_conventional {
725                system_prompt.push_str("\n\n=== CONVENTIONAL COMMITS FORMAT ===\n");
726                system_prompt.push_str("IMPORTANT: This uses Conventional Commits format. ");
727                system_prompt
728                    .push_str("DO NOT include any emojis in the commit message or PR title. ");
729                system_prompt.push_str("The 'emoji' field should be null.");
730            }
731        }
732
733        // Handle non-commit outputs: use output_emoji (independent of preset)
734        if capability == "pr" || capability == "review" {
735            if output_emoji {
736                Self::inject_pr_review_emoji_styling(system_prompt);
737            } else {
738                Self::inject_no_emoji_styling(system_prompt);
739            }
740        }
741
742        if capability == "release_notes" && output_emoji {
743            Self::inject_release_notes_emoji_styling(system_prompt);
744        } else if capability == "release_notes" {
745            Self::inject_no_emoji_styling(system_prompt);
746        }
747
748        if capability == "changelog" && output_emoji {
749            Self::inject_changelog_emoji_styling(system_prompt);
750        } else if capability == "changelog" {
751            Self::inject_no_emoji_styling(system_prompt);
752        }
753    }
754
755    fn inject_pr_review_emoji_styling(prompt: &mut String) {
756        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
757        prompt.push_str("Use emojis to make the output visually scannable and engaging:\n");
758        prompt.push_str("- H1 title: ONE gitmoji at the start (✨, 🐛, ♻️, etc.)\n");
759        prompt.push_str("- Section headers: Add relevant emojis (🎯 What's New, ⚙️ How It Works, 📋 Commits, ⚠️ Breaking Changes)\n");
760        prompt.push_str("- Commit list entries: Include gitmoji where appropriate\n");
761        prompt.push_str("- Body text: Keep clean - no scattered emojis within prose\n\n");
762        prompt.push_str("Choose from this gitmoji list:\n\n");
763        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
764    }
765
766    fn inject_release_notes_emoji_styling(prompt: &mut String) {
767        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
768        prompt.push_str("Use at most one emoji per highlight/section title. No emojis in bullet descriptions, upgrade notes, or metrics. ");
769        prompt.push_str("Pick from the approved gitmoji list (e.g., 🌟 Highlights, 🤖 Agents, 🔧 Tooling, 🐛 Fixes, ⚡ Performance). ");
770        prompt.push_str("Never sprinkle emojis within sentences or JSON keys.\n\n");
771        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
772    }
773
774    fn inject_changelog_emoji_styling(prompt: &mut String) {
775        prompt.push_str("\n\n=== EMOJI STYLING ===\n");
776        prompt.push_str("Section keys must remain plain text (Added/Changed/Deprecated/Removed/Fixed/Security). ");
777        prompt.push_str(
778            "You may include one emoji within a change description to reinforce meaning. ",
779        );
780        prompt.push_str(
781            "Never add emojis to JSON keys, section names, metrics, or upgrade notes.\n\n",
782        );
783        prompt.push_str(&crate::gitmoji::get_gitmoji_list());
784    }
785
786    fn inject_no_emoji_styling(prompt: &mut String) {
787        prompt.push_str("\n\n=== NO EMOJI STYLING ===\n");
788        prompt.push_str(
789            "DO NOT include any emojis anywhere in the output. Keep all content plain text.",
790        );
791    }
792
793    /// Execute a task with the given capability and user prompt
794    ///
795    /// This now automatically uses structured output based on the capability type
796    pub async fn execute_task(
797        &mut self,
798        capability: &str,
799        user_prompt: &str,
800    ) -> Result<StructuredResponse> {
801        use crate::agents::status::IrisPhase;
802        use crate::messages::get_capability_message;
803
804        // Show initializing status with a capability-specific message
805        let waiting_msg = get_capability_message(capability);
806        crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
807
808        // Load the capability config to get both prompt and output type
809        let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
810
811        // Inject style instructions (presets, gitmoji, conventional commits)
812        self.inject_style_instructions(&mut system_prompt, capability);
813
814        // Set the current capability
815        self.current_capability = Some(capability.to_string());
816
817        // Update status - analyzing with agent
818        crate::iris_status_dynamic!(
819            IrisPhase::Analysis,
820            "🔍 Iris is analyzing your changes...",
821            2,
822            4
823        );
824
825        // Use agent with tools for all structured outputs
826        // The agent will use tools as needed and respond with JSON
827        match output_type.as_str() {
828            "GeneratedMessage" => {
829                let response = self
830                    .execute_with_agent::<crate::types::GeneratedMessage>(
831                        &system_prompt,
832                        user_prompt,
833                    )
834                    .await?;
835                Ok(StructuredResponse::CommitMessage(response))
836            }
837            "MarkdownPullRequest" => {
838                let response = self
839                    .execute_with_agent::<crate::types::MarkdownPullRequest>(
840                        &system_prompt,
841                        user_prompt,
842                    )
843                    .await?;
844                Ok(StructuredResponse::PullRequest(response))
845            }
846            "MarkdownChangelog" => {
847                let response = self
848                    .execute_with_agent::<crate::types::MarkdownChangelog>(
849                        &system_prompt,
850                        user_prompt,
851                    )
852                    .await?;
853                Ok(StructuredResponse::Changelog(response))
854            }
855            "MarkdownReleaseNotes" => {
856                let response = self
857                    .execute_with_agent::<crate::types::MarkdownReleaseNotes>(
858                        &system_prompt,
859                        user_prompt,
860                    )
861                    .await?;
862                Ok(StructuredResponse::ReleaseNotes(response))
863            }
864            "MarkdownReview" => {
865                let response = self
866                    .execute_with_agent::<crate::types::MarkdownReview>(&system_prompt, user_prompt)
867                    .await?;
868                Ok(StructuredResponse::MarkdownReview(response))
869            }
870            "SemanticBlame" => {
871                // For semantic blame, we want plain text response
872                let agent = self.build_agent()?;
873                let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
874                let response = agent.prompt_multi_turn(&full_prompt, 10).await?;
875                Ok(StructuredResponse::SemanticBlame(response))
876            }
877            _ => {
878                // Fallback to regular agent for unknown types
879                let agent = self.build_agent()?;
880                let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
881                // Use multi_turn to allow tool calls even for unknown capability types
882                let response = agent.prompt_multi_turn(&full_prompt, 50).await?;
883                Ok(StructuredResponse::PlainText(response))
884            }
885        }
886    }
887
888    /// Execute a task with streaming, calling the callback with each text chunk
889    ///
890    /// This enables real-time display of LLM output in the TUI.
891    /// The callback receives `(chunk, aggregated_text)` for each delta.
892    ///
893    /// Returns the final structured response after streaming completes.
894    pub async fn execute_task_streaming<F>(
895        &mut self,
896        capability: &str,
897        user_prompt: &str,
898        mut on_chunk: F,
899    ) -> Result<StructuredResponse>
900    where
901        F: FnMut(&str, &str) + Send,
902    {
903        use crate::agents::status::IrisPhase;
904        use crate::messages::get_capability_message;
905        use futures::StreamExt;
906        use rig::agent::MultiTurnStreamItem;
907        use rig::streaming::{StreamedAssistantContent, StreamingPrompt};
908
909        // Show initializing status
910        let waiting_msg = get_capability_message(capability);
911        crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
912
913        // Load the capability config
914        let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
915
916        // Inject style instructions
917        self.inject_style_instructions(&mut system_prompt, capability);
918
919        // Set current capability
920        self.current_capability = Some(capability.to_string());
921
922        // Update status
923        crate::iris_status_dynamic!(
924            IrisPhase::Analysis,
925            "🔍 Iris is analyzing your changes...",
926            2,
927            4
928        );
929
930        // Build the full prompt (simplified for streaming - no JSON schema enforcement)
931        let full_prompt = format!(
932            "{}\n\n{}\n\n\
933            After using the available tools, respond with your analysis in markdown format.\n\
934            Keep it clear, well-structured, and informative.",
935            system_prompt, user_prompt
936        );
937
938        // Update status
939        let gen_msg = get_capability_message(capability);
940        crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
941
942        // Macro to consume a stream and aggregate text
943        macro_rules! consume_stream {
944            ($stream:expr) => {{
945                let mut aggregated_text = String::new();
946                let mut stream = $stream;
947                while let Some(item) = stream.next().await {
948                    match item {
949                        Ok(MultiTurnStreamItem::StreamAssistantItem(
950                            StreamedAssistantContent::Text(text),
951                        )) => {
952                            aggregated_text.push_str(&text.text);
953                            on_chunk(&text.text, &aggregated_text);
954                        }
955                        Ok(MultiTurnStreamItem::StreamAssistantItem(
956                            StreamedAssistantContent::ToolCall(tool_call),
957                        )) => {
958                            let tool_name = &tool_call.function.name;
959                            let reason = format!("Calling {}", tool_name);
960                            crate::iris_status_dynamic!(
961                                IrisPhase::ToolExecution {
962                                    tool_name: tool_name.clone(),
963                                    reason: reason.clone()
964                                },
965                                format!("🔧 {}", reason),
966                                3,
967                                4
968                            );
969                        }
970                        Ok(MultiTurnStreamItem::FinalResponse(_)) => break,
971                        Err(e) => return Err(anyhow::anyhow!("Streaming error: {}", e)),
972                        _ => {}
973                    }
974                }
975                aggregated_text
976            }};
977        }
978
979        // Build and stream per-provider (streaming types are model-specific)
980        let aggregated_text = match self.provider.as_str() {
981            "openai" => {
982                let agent = self.build_openai_agent_for_streaming(&full_prompt)?;
983                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
984                consume_stream!(stream)
985            }
986            "anthropic" => {
987                let agent = self.build_anthropic_agent_for_streaming(&full_prompt)?;
988                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
989                consume_stream!(stream)
990            }
991            "google" | "gemini" => {
992                let agent = self.build_gemini_agent_for_streaming(&full_prompt)?;
993                let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
994                consume_stream!(stream)
995            }
996            _ => return Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
997        };
998
999        // Update status
1000        crate::iris_status_dynamic!(
1001            IrisPhase::Synthesis,
1002            "✨ Iris is synthesizing results...",
1003            4,
1004            4
1005        );
1006
1007        let response = Self::text_to_structured_response(&output_type, aggregated_text);
1008        crate::iris_status_completed!();
1009        Ok(response)
1010    }
1011
1012    /// Convert raw text to the appropriate structured response type
1013    fn text_to_structured_response(output_type: &str, text: String) -> StructuredResponse {
1014        match output_type {
1015            "MarkdownReview" => {
1016                StructuredResponse::MarkdownReview(crate::types::MarkdownReview { content: text })
1017            }
1018            "MarkdownPullRequest" => {
1019                StructuredResponse::PullRequest(crate::types::MarkdownPullRequest { content: text })
1020            }
1021            "MarkdownChangelog" => {
1022                StructuredResponse::Changelog(crate::types::MarkdownChangelog { content: text })
1023            }
1024            "MarkdownReleaseNotes" => {
1025                StructuredResponse::ReleaseNotes(crate::types::MarkdownReleaseNotes {
1026                    content: text,
1027                })
1028            }
1029            "SemanticBlame" => StructuredResponse::SemanticBlame(text),
1030            _ => StructuredResponse::PlainText(text),
1031        }
1032    }
1033
1034    /// Build `OpenAI` agent for streaming (with tools attached)
1035    fn build_openai_agent_for_streaming(
1036        &self,
1037        _prompt: &str,
1038    ) -> Result<rig::agent::Agent<provider::OpenAIModel>> {
1039        use crate::agents::debug_tool::DebugTool;
1040
1041        let fast_model = self.effective_fast_model();
1042        let subagent_timeout = self
1043            .config
1044            .as_ref()
1045            .map_or(120, |c| c.subagent_timeout_secs);
1046
1047        // Build subagent
1048        let sub_agent = crate::attach_core_tools!(
1049            provider::openai_builder(fast_model)
1050                .name("analyze_subagent")
1051                .preamble("You are a specialized analysis sub-agent.")
1052                .max_tokens(4096)
1053        )
1054        .build();
1055
1056        // Build main agent with tools
1057        let builder = provider::openai_builder(&self.model)
1058            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1059            .max_tokens(16384);
1060
1061        let builder = crate::attach_core_tools!(builder)
1062            .tool(DebugTool::new(GitRepoInfo))
1063            .tool(DebugTool::new(self.workspace.clone()))
1064            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1065                &self.provider,
1066                fast_model,
1067                subagent_timeout,
1068            )?))
1069            .tool(sub_agent);
1070
1071        // Conditionally attach content update tools for chat mode
1072        if let Some(sender) = &self.content_update_sender {
1073            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1074            Ok(builder
1075                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1076                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1077                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1078                .build())
1079        } else {
1080            Ok(builder.build())
1081        }
1082    }
1083
1084    /// Build Anthropic agent for streaming (with tools attached)
1085    fn build_anthropic_agent_for_streaming(
1086        &self,
1087        _prompt: &str,
1088    ) -> Result<rig::agent::Agent<provider::AnthropicModel>> {
1089        use crate::agents::debug_tool::DebugTool;
1090
1091        let fast_model = self.effective_fast_model();
1092        let subagent_timeout = self
1093            .config
1094            .as_ref()
1095            .map_or(120, |c| c.subagent_timeout_secs);
1096
1097        // Build subagent
1098        let sub_agent = crate::attach_core_tools!(
1099            provider::anthropic_builder(fast_model)
1100                .name("analyze_subagent")
1101                .preamble("You are a specialized analysis sub-agent.")
1102                .max_tokens(4096)
1103        )
1104        .build();
1105
1106        // Build main agent with tools
1107        let builder = provider::anthropic_builder(&self.model)
1108            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1109            .max_tokens(16384);
1110
1111        let builder = crate::attach_core_tools!(builder)
1112            .tool(DebugTool::new(GitRepoInfo))
1113            .tool(DebugTool::new(self.workspace.clone()))
1114            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1115                &self.provider,
1116                fast_model,
1117                subagent_timeout,
1118            )?))
1119            .tool(sub_agent);
1120
1121        // Conditionally attach content update tools for chat mode
1122        if let Some(sender) = &self.content_update_sender {
1123            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1124            Ok(builder
1125                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1126                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1127                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1128                .build())
1129        } else {
1130            Ok(builder.build())
1131        }
1132    }
1133
1134    /// Build Gemini agent for streaming (with tools attached)
1135    fn build_gemini_agent_for_streaming(
1136        &self,
1137        _prompt: &str,
1138    ) -> Result<rig::agent::Agent<provider::GeminiModel>> {
1139        use crate::agents::debug_tool::DebugTool;
1140
1141        let fast_model = self.effective_fast_model();
1142        let subagent_timeout = self
1143            .config
1144            .as_ref()
1145            .map_or(120, |c| c.subagent_timeout_secs);
1146
1147        // Build subagent
1148        let sub_agent = crate::attach_core_tools!(
1149            provider::gemini_builder(fast_model)
1150                .name("analyze_subagent")
1151                .preamble("You are a specialized analysis sub-agent.")
1152                .max_tokens(4096)
1153        )
1154        .build();
1155
1156        // Build main agent with tools
1157        let builder = provider::gemini_builder(&self.model)
1158            .preamble(self.preamble.as_deref().unwrap_or("You are Iris."))
1159            .max_tokens(16384);
1160
1161        let builder = crate::attach_core_tools!(builder)
1162            .tool(DebugTool::new(GitRepoInfo))
1163            .tool(DebugTool::new(self.workspace.clone()))
1164            .tool(DebugTool::new(ParallelAnalyze::with_timeout(
1165                &self.provider,
1166                fast_model,
1167                subagent_timeout,
1168            )?))
1169            .tool(sub_agent);
1170
1171        // Conditionally attach content update tools for chat mode
1172        if let Some(sender) = &self.content_update_sender {
1173            use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
1174            Ok(builder
1175                .tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
1176                .tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
1177                .tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
1178                .build())
1179        } else {
1180            Ok(builder.build())
1181        }
1182    }
1183
1184    /// Load capability configuration from embedded TOML, returning both prompt and output type
1185    fn load_capability_config(&self, capability: &str) -> Result<(String, String)> {
1186        let _ = self; // Keep &self for method syntax consistency
1187        // Use embedded capability strings - always available regardless of working directory
1188        let content = match capability {
1189            "commit" => CAPABILITY_COMMIT,
1190            "pr" => CAPABILITY_PR,
1191            "review" => CAPABILITY_REVIEW,
1192            "changelog" => CAPABILITY_CHANGELOG,
1193            "release_notes" => CAPABILITY_RELEASE_NOTES,
1194            "chat" => CAPABILITY_CHAT,
1195            "semantic_blame" => CAPABILITY_SEMANTIC_BLAME,
1196            _ => {
1197                // Return generic prompt for unknown capabilities
1198                return Ok((
1199                    format!(
1200                        "You are helping with a {capability} task. Use the available Git tools to assist the user."
1201                    ),
1202                    "PlainText".to_string(),
1203                ));
1204            }
1205        };
1206
1207        // Parse TOML to extract both task_prompt and output_type
1208        let parsed: toml::Value = toml::from_str(content)?;
1209
1210        let task_prompt = parsed
1211            .get("task_prompt")
1212            .and_then(|v| v.as_str())
1213            .ok_or_else(|| anyhow::anyhow!("No task_prompt found in capability file"))?;
1214
1215        let output_type = parsed
1216            .get("output_type")
1217            .and_then(|v| v.as_str())
1218            .unwrap_or("PlainText")
1219            .to_string();
1220
1221        Ok((task_prompt.to_string(), output_type))
1222    }
1223
1224    /// Get the current capability being executed
1225    pub fn current_capability(&self) -> Option<&str> {
1226        self.current_capability.as_deref()
1227    }
1228
1229    /// Simple single-turn execution for basic queries
1230    pub async fn chat(&self, message: &str) -> Result<String> {
1231        let agent = self.build_agent()?;
1232        let response = agent.prompt(message).await?;
1233        Ok(response)
1234    }
1235
1236    /// Set the current capability
1237    pub fn set_capability(&mut self, capability: &str) {
1238        self.current_capability = Some(capability.to_string());
1239    }
1240
1241    /// Get provider configuration
1242    pub fn provider_config(&self) -> &HashMap<String, String> {
1243        &self.provider_config
1244    }
1245
1246    /// Set provider configuration
1247    pub fn set_provider_config(&mut self, config: HashMap<String, String>) {
1248        self.provider_config = config;
1249    }
1250
1251    /// Set custom preamble
1252    pub fn set_preamble(&mut self, preamble: String) {
1253        self.preamble = Some(preamble);
1254    }
1255
1256    /// Set configuration
1257    pub fn set_config(&mut self, config: crate::config::Config) {
1258        self.config = Some(config);
1259    }
1260
1261    /// Set fast model for subagents
1262    pub fn set_fast_model(&mut self, fast_model: String) {
1263        self.fast_model = Some(fast_model);
1264    }
1265}
1266
1267/// Builder for creating `IrisAgent` instances with different configurations
1268pub struct IrisAgentBuilder {
1269    provider: String,
1270    model: String,
1271    preamble: Option<String>,
1272}
1273
1274impl IrisAgentBuilder {
1275    /// Create a new builder
1276    pub fn new() -> Self {
1277        Self {
1278            provider: "openai".to_string(),
1279            model: "gpt-4o".to_string(),
1280            preamble: None,
1281        }
1282    }
1283
1284    /// Set the provider to use
1285    pub fn with_provider(mut self, provider: impl Into<String>) -> Self {
1286        self.provider = provider.into();
1287        self
1288    }
1289
1290    /// Set the model to use
1291    pub fn with_model(mut self, model: impl Into<String>) -> Self {
1292        self.model = model.into();
1293        self
1294    }
1295
1296    /// Set a custom preamble
1297    pub fn with_preamble(mut self, preamble: impl Into<String>) -> Self {
1298        self.preamble = Some(preamble.into());
1299        self
1300    }
1301
1302    /// Build the `IrisAgent`
1303    pub fn build(self) -> Result<IrisAgent> {
1304        let mut agent = IrisAgent::new(&self.provider, &self.model)?;
1305
1306        // Apply custom preamble if provided
1307        if let Some(preamble) = self.preamble {
1308            agent.set_preamble(preamble);
1309        }
1310
1311        Ok(agent)
1312    }
1313}
1314
1315impl Default for IrisAgentBuilder {
1316    fn default() -> Self {
1317        Self::new()
1318    }
1319}
1320
1321#[cfg(test)]
1322mod tests {
1323    use super::sanitize_json_response;
1324    use serde_json::Value;
1325    use std::borrow::Cow;
1326
1327    #[test]
1328    fn sanitize_json_response_is_noop_for_valid_payloads() {
1329        let raw = r#"{"title":"Test","description":"All good"}"#;
1330        let sanitized = sanitize_json_response(raw);
1331        assert!(matches!(sanitized, Cow::Borrowed(_)));
1332        serde_json::from_str::<Value>(sanitized.as_ref()).expect("valid JSON");
1333    }
1334
1335    #[test]
1336    fn sanitize_json_response_escapes_literal_newlines() {
1337        let raw = "{\"description\": \"Line1
1338Line2\"}";
1339        let sanitized = sanitize_json_response(raw);
1340        assert_eq!(sanitized.as_ref(), "{\"description\": \"Line1\\nLine2\"}");
1341        serde_json::from_str::<Value>(sanitized.as_ref()).expect("json sanitized");
1342    }
1343}