use anyhow::Result;
use rig::agent::{AgentBuilder, PromptResponse};
use rig::completion::CompletionModel;
use schemars::JsonSchema;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt;
macro_rules! build_streaming_agent {
($self:expr, $builder_fn:path, $fast_model:expr, $api_key:expr, $subagent_timeout:expr) => {{
use crate::agents::debug_tool::DebugTool;
let sub_builder = $builder_fn($fast_model, $api_key)?
.name("analyze_subagent")
.preamble("You are a specialized analysis sub-agent.");
let sub_builder = $self.apply_completion_params(
sub_builder,
$fast_model,
4096,
CompletionProfile::Subagent,
)?;
let sub_agent = crate::attach_core_tools!(sub_builder).build();
let builder = $builder_fn(&$self.model, $api_key)?
.preamble($self.preamble.as_deref().unwrap_or("You are Iris."));
let builder = $self.apply_completion_params(
builder,
&$self.model,
16384,
CompletionProfile::MainAgent,
)?;
let builder = crate::attach_core_tools!(builder)
.tool(DebugTool::new(GitRepoInfo))
.tool(DebugTool::new($self.workspace.clone()))
.tool(DebugTool::new(ParallelAnalyze::with_timeout(
&$self.provider,
$fast_model,
$subagent_timeout,
$api_key,
$self.current_provider_additional_params().cloned(),
)?))
.tool(sub_agent);
if let Some(sender) = &$self.content_update_sender {
use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
Ok(builder
.tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
.tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
.tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
.build())
} else {
Ok(builder.build())
}
}};
}
const CAPABILITY_COMMIT: &str = include_str!("capabilities/commit.toml");
const CAPABILITY_PR: &str = include_str!("capabilities/pr.toml");
const CAPABILITY_REVIEW: &str = include_str!("capabilities/review.toml");
const CAPABILITY_CHANGELOG: &str = include_str!("capabilities/changelog.toml");
const CAPABILITY_RELEASE_NOTES: &str = include_str!("capabilities/release_notes.toml");
const CAPABILITY_CHAT: &str = include_str!("capabilities/chat.toml");
const CAPABILITY_SEMANTIC_BLAME: &str = include_str!("capabilities/semantic_blame.toml");
const DEFAULT_PREAMBLE: &str = "\
You are Iris, a helpful AI assistant specialized in Git operations and workflows.
You have access to Git tools, code analysis tools, and powerful sub-agent capabilities for handling large analyses.
**File Access Tools:**
- **file_read** - Read file contents directly. Use `start_line` and `num_lines` for large files.
- **project_docs** - Load a compact snapshot of README and agent instructions. Use targeted doc types for full docs when needed.
- **code_search** - Search for patterns across files. Use sparingly; prefer file_read for known files.
**Sub-Agent Tools:**
1. **parallel_analyze** - Run multiple analysis tasks CONCURRENTLY with independent context windows
- Best for: Large changesets (>500 lines or >20 files), batch commit analysis
- Each task runs in its own subagent, preventing context overflow
- Example: parallel_analyze({ \"tasks\": [\"Analyze auth/ changes for security\", \"Review db/ for performance\", \"Check api/ for breaking changes\"] })
2. **analyze_subagent** - Delegate a single focused task to a sub-agent
- Best for: Deep dive on specific files or focused analysis
**Best Practices:**
- Use git_diff to get changes first - it includes file content
- Use file_read to read files directly instead of multiple code_search calls
- Use project_docs when repository conventions or product framing matter; do not front-load docs if the diff already answers the question
- Use parallel_analyze for large changesets to avoid context overflow";
fn streaming_response_instructions(capability: &str) -> &'static str {
if capability == "chat" {
"After using the available tools, respond in plain text.\n\
Keep it concise and do not repeat full content that tools already updated."
} else {
"After using the available tools, respond with your analysis in markdown format.\n\
Keep it clear, well-structured, and informative."
}
}
use crate::agents::provider::{self, CompletionProfile, DynAgent};
use crate::agents::tools::{GitRepoInfo, ParallelAnalyze, Workspace};
#[async_trait::async_trait]
pub trait StreamingCallback: Send + Sync {
async fn on_chunk(
&self,
chunk: &str,
tokens: Option<crate::agents::status::TokenMetrics>,
) -> Result<()>;
async fn on_complete(
&self,
full_response: &str,
final_tokens: crate::agents::status::TokenMetrics,
) -> Result<()>;
async fn on_error(&self, error: &anyhow::Error) -> Result<()>;
async fn on_status_update(&self, message: &str) -> Result<()>;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum StructuredResponse {
CommitMessage(crate::types::GeneratedMessage),
PullRequest(crate::types::MarkdownPullRequest),
Changelog(crate::types::MarkdownChangelog),
ReleaseNotes(crate::types::MarkdownReleaseNotes),
MarkdownReview(crate::types::MarkdownReview),
SemanticBlame(String),
PlainText(String),
}
impl fmt::Display for StructuredResponse {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StructuredResponse::CommitMessage(msg) => {
write!(f, "{}", crate::types::format_commit_message(msg))
}
StructuredResponse::PullRequest(pr) => {
write!(f, "{}", pr.raw_content())
}
StructuredResponse::Changelog(cl) => {
write!(f, "{}", cl.raw_content())
}
StructuredResponse::ReleaseNotes(rn) => {
write!(f, "{}", rn.raw_content())
}
StructuredResponse::MarkdownReview(review) => {
write!(f, "{}", review.format())
}
StructuredResponse::SemanticBlame(explanation) => {
write!(f, "{explanation}")
}
StructuredResponse::PlainText(text) => {
write!(f, "{text}")
}
}
}
}
fn find_balanced_braces(s: &str) -> Option<(usize, usize)> {
let mut depth: i32 = 0;
let mut start: Option<usize> = None;
for (i, ch) in s.char_indices() {
match ch {
'{' => {
if depth == 0 {
start = Some(i);
}
depth += 1;
}
'}' if depth > 0 => {
depth -= 1;
if depth == 0 {
return start.map(|s_idx| (s_idx, i + 1));
}
}
_ => {}
}
}
None
}
fn extract_json_from_response(response: &str) -> Result<String> {
use crate::agents::debug;
debug::debug_section("JSON Extraction");
let trimmed_response = response.trim();
if trimmed_response.starts_with('{')
&& serde_json::from_str::<serde_json::Value>(trimmed_response).is_ok()
{
debug::debug_context_management(
"Response is pure JSON",
&format!("{} characters", trimmed_response.len()),
);
return Ok(trimmed_response.to_string());
}
if let Some(start) = response.find("```json") {
let content_start = start + "```json".len();
let json_end = if let Some(end) = response[content_start..].find("\n```") {
end
} else {
response[content_start..]
.find("```")
.unwrap_or(response.len() - content_start)
};
let json_content = &response[content_start..content_start + json_end];
let trimmed = json_content.trim().to_string();
debug::debug_context_management(
"Found JSON in markdown code block",
&format!("{} characters", trimmed.len()),
);
if let Err(e) = debug::write_debug_artifact("iris_extracted.json", &trimmed) {
debug::debug_warning(&format!("Failed to write extracted JSON: {}", e));
}
debug::debug_json_parse_attempt(&trimmed);
return Ok(trimmed);
}
let mut last_error: Option<anyhow::Error> = None;
let mut cursor = 0;
while cursor < response.len() {
let Some((rel_start, rel_end)) = find_balanced_braces(&response[cursor..]) else {
break;
};
let start = cursor + rel_start;
let end = cursor + rel_end;
let json_content = &response[start..end];
debug::debug_json_parse_attempt(json_content);
let sanitized = sanitize_json_response(json_content);
match serde_json::from_str::<serde_json::Value>(&sanitized) {
Ok(_) => {
debug::debug_context_management(
"Found valid JSON object",
&format!("{} characters", json_content.len()),
);
return Ok(sanitized.into_owned());
}
Err(e) => {
debug::debug_json_parse_error(&format!(
"Candidate at offset {} is not valid JSON: {}",
start, e
));
let preview = if json_content.len() > 200 {
format!("{}...", &json_content[..200])
} else {
json_content.to_string()
};
last_error = Some(anyhow::anyhow!(
"Found JSON-like content but it's not valid JSON: {}\nPreview: {}",
e,
preview
));
cursor = start + 1;
}
}
}
if let Some(err) = last_error {
return Err(err);
}
let trimmed = response.trim();
if trimmed.starts_with('#') || trimmed.starts_with("##") {
debug::debug_context_management(
"Detected raw markdown response",
"Wrapping in JSON structure",
);
let escaped_content = serde_json::to_string(trimmed)?;
let wrapped = format!(r#"{{"content": {}}}"#, escaped_content);
debug::debug_json_parse_attempt(&wrapped);
return Ok(wrapped);
}
debug::debug_json_parse_error("No valid JSON found in response");
Err(anyhow::anyhow!("No valid JSON found in response"))
}
fn sanitize_json_response(raw: &str) -> Cow<'_, str> {
let mut needs_sanitization = false;
let mut in_string = false;
let mut escaped = false;
for ch in raw.chars() {
if in_string {
if escaped {
escaped = false;
continue;
}
match ch {
'\\' => escaped = true,
'"' => in_string = false,
'\n' | '\r' | '\t' => {
needs_sanitization = true;
break;
}
c if c.is_control() => {
needs_sanitization = true;
break;
}
_ => {}
}
} else if ch == '"' {
in_string = true;
}
}
if !needs_sanitization {
return Cow::Borrowed(raw);
}
let mut sanitized = String::with_capacity(raw.len());
in_string = false;
escaped = false;
for ch in raw.chars() {
if in_string {
if escaped {
sanitized.push(ch);
escaped = false;
continue;
}
match ch {
'\\' => {
sanitized.push('\\');
escaped = true;
}
'"' => {
sanitized.push('"');
in_string = false;
}
'\n' => sanitized.push_str("\\n"),
'\r' => sanitized.push_str("\\r"),
'\t' => sanitized.push_str("\\t"),
c if c.is_control() => {
use std::fmt::Write as _;
let _ = write!(&mut sanitized, "\\u{:04X}", u32::from(c));
}
_ => sanitized.push(ch),
}
} else {
sanitized.push(ch);
if ch == '"' {
in_string = true;
escaped = false;
}
}
}
Cow::Owned(sanitized)
}
fn parse_with_recovery<T>(json_str: &str) -> Result<T>
where
T: JsonSchema + DeserializeOwned,
{
use crate::agents::debug as agent_debug;
use crate::agents::output_validator::validate_and_parse;
let validation_result = validate_and_parse::<T>(json_str)?;
if validation_result.recovered {
agent_debug::debug_context_management(
"JSON recovery applied",
&format!("{} issues fixed", validation_result.warnings.len()),
);
for warning in &validation_result.warnings {
agent_debug::debug_warning(warning);
}
}
validation_result
.value
.ok_or_else(|| anyhow::anyhow!("Failed to parse JSON even after recovery"))
}
pub struct IrisAgent {
provider: String,
model: String,
fast_model: Option<String>,
current_capability: Option<String>,
provider_config: HashMap<String, String>,
preamble: Option<String>,
config: Option<crate::config::Config>,
content_update_sender: Option<crate::agents::tools::ContentUpdateSender>,
workspace: Workspace,
}
impl IrisAgent {
pub fn new(provider: &str, model: &str) -> Result<Self> {
Ok(Self {
provider: provider.to_string(),
model: model.to_string(),
fast_model: None,
current_capability: None,
provider_config: HashMap::new(),
preamble: None,
config: None,
content_update_sender: None,
workspace: Workspace::new(),
})
}
pub fn set_content_update_sender(&mut self, sender: crate::agents::tools::ContentUpdateSender) {
self.content_update_sender = Some(sender);
}
fn effective_fast_model(&self) -> &str {
self.fast_model.as_deref().unwrap_or(&self.model)
}
fn get_api_key(&self) -> Option<&str> {
provider::current_provider_config(self.config.as_ref(), &self.provider)
.and_then(crate::providers::ProviderConfig::api_key_if_set)
}
fn current_provider(&self) -> Result<crate::providers::Provider> {
provider::provider_from_name(&self.provider)
}
fn current_provider_additional_params(&self) -> Option<&HashMap<String, String>> {
provider::current_provider_config(self.config.as_ref(), &self.provider)
.map(|provider_config| &provider_config.additional_params)
}
fn build_agent(&self) -> Result<DynAgent> {
use crate::agents::debug_tool::DebugTool;
let preamble = self.preamble.as_deref().unwrap_or(DEFAULT_PREAMBLE);
let fast_model = self.effective_fast_model();
let api_key = self.get_api_key();
let subagent_timeout = self
.config
.as_ref()
.map_or(120, |c| c.subagent_timeout_secs);
macro_rules! build_subagent {
($builder:expr) => {{
let builder = $builder
.name("analyze_subagent")
.description("Delegate focused analysis tasks to a sub-agent with its own context window. Use for analyzing specific files, commits, or code sections independently. The sub-agent has access to Git tools (diff, log, status) and file analysis tools.")
.preamble("You are a specialized analysis sub-agent for Iris. Your job is to complete focused analysis tasks and return concise, actionable summaries.
Guidelines:
- Use the available tools to gather information
- Focus only on what's asked - don't expand scope
- Return a clear, structured summary of findings
- Highlight important issues, patterns, or insights
- Keep your response focused and concise")
;
let builder = self.apply_completion_params(
builder,
fast_model,
4096,
CompletionProfile::Subagent,
)?;
crate::attach_core_tools!(builder).build()
}};
}
macro_rules! attach_main_tools {
($builder:expr) => {{
crate::attach_core_tools!($builder)
.tool(DebugTool::new(GitRepoInfo))
.tool(DebugTool::new(self.workspace.clone()))
.tool(DebugTool::new(ParallelAnalyze::with_timeout(
&self.provider,
fast_model,
subagent_timeout,
api_key,
self.current_provider_additional_params().cloned(),
)?))
}};
}
macro_rules! maybe_attach_update_tools {
($builder:expr) => {{
if let Some(sender) = &self.content_update_sender {
use crate::agents::tools::{UpdateCommitTool, UpdatePRTool, UpdateReviewTool};
$builder
.tool(DebugTool::new(UpdateCommitTool::new(sender.clone())))
.tool(DebugTool::new(UpdatePRTool::new(sender.clone())))
.tool(DebugTool::new(UpdateReviewTool::new(sender.clone())))
.build()
} else {
$builder.build()
}
}};
}
match self.provider.as_str() {
"openai" => {
let sub_agent = build_subagent!(provider::openai_builder(fast_model, api_key)?);
let builder = provider::openai_builder(&self.model, api_key)?.preamble(preamble);
let builder = self.apply_completion_params(
builder,
&self.model,
16384,
CompletionProfile::MainAgent,
)?;
let builder = attach_main_tools!(builder).tool(sub_agent);
let agent = maybe_attach_update_tools!(builder);
Ok(DynAgent::OpenAI(agent))
}
"anthropic" => {
let sub_agent = build_subagent!(provider::anthropic_builder(fast_model, api_key)?);
let builder = provider::anthropic_builder(&self.model, api_key)?.preamble(preamble);
let builder = self.apply_completion_params(
builder,
&self.model,
16384,
CompletionProfile::MainAgent,
)?;
let builder = attach_main_tools!(builder).tool(sub_agent);
let agent = maybe_attach_update_tools!(builder);
Ok(DynAgent::Anthropic(agent))
}
"google" | "gemini" => {
let sub_agent = build_subagent!(provider::gemini_builder(fast_model, api_key)?);
let builder = provider::gemini_builder(&self.model, api_key)?.preamble(preamble);
let builder = self.apply_completion_params(
builder,
&self.model,
16384,
CompletionProfile::MainAgent,
)?;
let builder = attach_main_tools!(builder).tool(sub_agent);
let agent = maybe_attach_update_tools!(builder);
Ok(DynAgent::Gemini(agent))
}
_ => Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
}
}
fn apply_completion_params<M>(
&self,
builder: AgentBuilder<M>,
model: &str,
max_tokens: u64,
profile: CompletionProfile,
) -> Result<AgentBuilder<M>>
where
M: CompletionModel,
{
let provider = self.current_provider()?;
Ok(provider::apply_completion_params(
builder,
provider,
model,
max_tokens,
self.current_provider_additional_params(),
profile,
))
}
async fn execute_with_agent<T>(&self, system_prompt: &str, user_prompt: &str) -> Result<T>
where
T: JsonSchema + for<'a> serde::Deserialize<'a> + serde::Serialize + Send + Sync + 'static,
{
use crate::agents::debug;
use crate::agents::status::IrisPhase;
use crate::messages::get_capability_message;
use schemars::schema_for;
let capability = self.current_capability().unwrap_or("commit");
debug::debug_phase_change(&format!("AGENT EXECUTION: {}", std::any::type_name::<T>()));
let msg = get_capability_message(capability);
crate::iris_status_dynamic!(IrisPhase::Planning, msg.text, 2, 4);
let agent = self.build_agent()?;
debug::debug_context_management(
"Agent built with tools",
&format!(
"Provider: {}, Model: {} (fast: {})",
self.provider,
self.model,
self.effective_fast_model()
),
);
let schema = schema_for!(T);
let schema_json = serde_json::to_string_pretty(&schema)?;
debug::debug_context_management(
"JSON schema created",
&format!("Type: {}", std::any::type_name::<T>()),
);
let full_prompt = format!(
"{system_prompt}\n\n{user_prompt}\n\n\
=== CRITICAL: RESPONSE FORMAT ===\n\
After using the available tools to gather necessary information, you MUST respond with ONLY a valid JSON object.\n\n\
REQUIRED JSON SCHEMA:\n\
{schema_json}\n\n\
CRITICAL INSTRUCTIONS:\n\
- Return ONLY the raw JSON object - nothing else\n\
- NO explanations before the JSON\n\
- NO explanations after the JSON\n\
- NO markdown code blocks (just raw JSON)\n\
- NO preamble text like 'Here is the JSON:' or 'Let me generate:'\n\
- Start your response with {{ and end with }}\n\
- The JSON must be complete and valid\n\n\
Your entire response should be ONLY the JSON object."
);
debug::debug_llm_request(&full_prompt, Some(16384));
let gen_msg = get_capability_message(capability);
crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
let timer = debug::DebugTimer::start("Agent prompt execution");
debug::debug_context_management(
"LLM request",
"Sending prompt to agent with multi_turn(50)",
);
let prompt_response: PromptResponse = agent.prompt_extended(&full_prompt, 50).await?;
timer.finish();
let usage = &prompt_response.usage;
debug::debug_context_management(
"Token usage",
&format!(
"input: {} | output: {} | total: {}",
usage.input_tokens, usage.output_tokens, usage.total_tokens
),
);
let response = &prompt_response.output;
#[allow(clippy::cast_possible_truncation, clippy::as_conversions)]
let total_tokens_usize = usage.total_tokens as usize;
debug::debug_llm_response(
response,
std::time::Duration::from_secs(0),
Some(total_tokens_usize),
);
crate::iris_status_dynamic!(
IrisPhase::Synthesis,
"✨ Iris is synthesizing results...",
4,
4
);
let json_str = extract_json_from_response(response)?;
let sanitized_json = sanitize_json_response(&json_str);
let sanitized_ref = sanitized_json.as_ref();
if matches!(sanitized_json, Cow::Borrowed(_)) {
debug::debug_json_parse_attempt(sanitized_ref);
} else {
debug::debug_context_management(
"Sanitized JSON response",
&format!("{} → {} characters", json_str.len(), sanitized_ref.len()),
);
debug::debug_json_parse_attempt(sanitized_ref);
}
let result: T = parse_with_recovery(sanitized_ref)?;
debug::debug_json_parse_success(std::any::type_name::<T>());
crate::iris_status_completed!();
Ok(result)
}
fn inject_style_instructions(&self, system_prompt: &mut String, capability: &str) {
let Some(config) = &self.config else {
return;
};
let preset_name = config.get_effective_preset_name();
let is_conventional = preset_name == "conventional";
let is_default_mode = preset_name == "default" || preset_name.is_empty();
let use_style_detection =
capability == "commit" && is_default_mode && config.gitmoji_override.is_none();
let commit_emoji = config.use_gitmoji && !is_conventional && !use_style_detection;
let output_emoji = config.gitmoji_override.unwrap_or(config.use_gitmoji);
if !preset_name.is_empty() && !is_default_mode {
let library = crate::instruction_presets::get_instruction_preset_library();
if let Some(preset) = library.get_preset(preset_name) {
tracing::info!("📋 Injecting '{}' preset style instructions", preset_name);
system_prompt.push_str("\n\n=== STYLE INSTRUCTIONS ===\n");
system_prompt.push_str(&preset.instructions);
system_prompt.push('\n');
} else {
tracing::warn!("⚠️ Preset '{}' not found in library", preset_name);
}
}
if capability == "commit" {
if commit_emoji {
system_prompt.push_str("\n\n=== GITMOJI INSTRUCTIONS ===\n");
system_prompt.push_str("Set the 'emoji' field to a single relevant gitmoji. ");
system_prompt.push_str(
"DO NOT include the emoji in the 'message' or 'title' text - only set the 'emoji' field. ",
);
system_prompt.push_str("Choose the closest match from this compact guide:\n\n");
system_prompt.push_str(&crate::gitmoji::get_gitmoji_prompt_guide());
system_prompt.push_str("\n\nThe emoji should match the primary type of change.");
} else if is_conventional {
system_prompt.push_str("\n\n=== CONVENTIONAL COMMITS FORMAT ===\n");
system_prompt.push_str("IMPORTANT: This uses Conventional Commits format. ");
system_prompt
.push_str("DO NOT include any emojis in the commit message or PR title. ");
system_prompt.push_str("The 'emoji' field should be null.");
}
}
if capability == "pr" || capability == "review" {
if output_emoji {
Self::inject_pr_review_emoji_styling(system_prompt);
} else {
Self::inject_no_emoji_styling(system_prompt);
}
}
if capability == "release_notes" && output_emoji {
Self::inject_release_notes_emoji_styling(system_prompt);
} else if capability == "release_notes" {
Self::inject_no_emoji_styling(system_prompt);
}
if capability == "changelog" && output_emoji {
Self::inject_changelog_emoji_styling(system_prompt);
} else if capability == "changelog" {
Self::inject_no_emoji_styling(system_prompt);
}
}
fn inject_pr_review_emoji_styling(prompt: &mut String) {
prompt.push_str("\n\n=== EMOJI STYLING ===\n");
prompt.push_str("Use emojis to make the output visually scannable and engaging:\n");
prompt.push_str("- H1 title: ONE gitmoji at the start (✨, 🐛, ♻️, etc.)\n");
prompt.push_str("- Section headers: Add relevant emojis (🎯 What's New, ⚙️ How It Works, 📋 Commits, ⚠️ Breaking Changes)\n");
prompt.push_str("- Commit list entries: Include gitmoji where appropriate\n");
prompt.push_str("- Body text: Keep clean - no scattered emojis within prose\n\n");
prompt.push_str(&crate::gitmoji::get_gitmoji_prompt_guide());
}
fn inject_release_notes_emoji_styling(prompt: &mut String) {
prompt.push_str("\n\n=== EMOJI STYLING ===\n");
prompt.push_str("Use at most one emoji per highlight/section title. No emojis in bullet descriptions, upgrade notes, or metrics. ");
prompt.push_str("Pick from the approved gitmoji list (e.g., 🌟 Highlights, 🤖 Agents, 🔧 Tooling, 🐛 Fixes, ⚡ Performance). ");
prompt.push_str("Never sprinkle emojis within sentences or JSON keys.\n\n");
prompt.push_str(&crate::gitmoji::get_gitmoji_prompt_guide());
}
fn inject_changelog_emoji_styling(prompt: &mut String) {
prompt.push_str("\n\n=== EMOJI STYLING ===\n");
prompt.push_str("Section keys must remain plain text (Added/Changed/Deprecated/Removed/Fixed/Security). ");
prompt.push_str(
"You may include one emoji within a change description to reinforce meaning. ",
);
prompt.push_str(
"Never add emojis to JSON keys, section names, metrics, or upgrade notes.\n\n",
);
prompt.push_str(&crate::gitmoji::get_gitmoji_prompt_guide());
}
fn inject_no_emoji_styling(prompt: &mut String) {
prompt.push_str("\n\n=== NO EMOJI STYLING ===\n");
prompt.push_str(
"DO NOT include any emojis anywhere in the output. Keep all content plain text.",
);
}
pub async fn execute_task(
&mut self,
capability: &str,
user_prompt: &str,
) -> Result<StructuredResponse> {
use crate::agents::status::IrisPhase;
use crate::messages::get_capability_message;
let waiting_msg = get_capability_message(capability);
crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
self.inject_style_instructions(&mut system_prompt, capability);
self.current_capability = Some(capability.to_string());
crate::iris_status_dynamic!(
IrisPhase::Analysis,
"🔍 Iris is analyzing your changes...",
2,
4
);
match output_type.as_str() {
"GeneratedMessage" => {
let response = self
.execute_with_agent::<crate::types::GeneratedMessage>(
&system_prompt,
user_prompt,
)
.await?;
Ok(StructuredResponse::CommitMessage(response))
}
"MarkdownPullRequest" => {
let response = self
.execute_with_agent::<crate::types::MarkdownPullRequest>(
&system_prompt,
user_prompt,
)
.await?;
Ok(StructuredResponse::PullRequest(response))
}
"MarkdownChangelog" => {
let response = self
.execute_with_agent::<crate::types::MarkdownChangelog>(
&system_prompt,
user_prompt,
)
.await?;
Ok(StructuredResponse::Changelog(response))
}
"MarkdownReleaseNotes" => {
let response = self
.execute_with_agent::<crate::types::MarkdownReleaseNotes>(
&system_prompt,
user_prompt,
)
.await?;
Ok(StructuredResponse::ReleaseNotes(response))
}
"MarkdownReview" => {
let response = self
.execute_with_agent::<crate::types::MarkdownReview>(&system_prompt, user_prompt)
.await?;
Ok(StructuredResponse::MarkdownReview(response))
}
"SemanticBlame" => {
let agent = self.build_agent()?;
let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
let response = agent.prompt_multi_turn(&full_prompt, 10).await?;
Ok(StructuredResponse::SemanticBlame(response))
}
_ => {
let agent = self.build_agent()?;
let full_prompt = format!("{system_prompt}\n\n{user_prompt}");
let response = agent.prompt_multi_turn(&full_prompt, 50).await?;
Ok(StructuredResponse::PlainText(response))
}
}
}
pub async fn execute_task_streaming<F>(
&mut self,
capability: &str,
user_prompt: &str,
mut on_chunk: F,
) -> Result<StructuredResponse>
where
F: FnMut(&str, &str) + Send,
{
use crate::agents::status::IrisPhase;
use crate::messages::get_capability_message;
use futures::StreamExt;
use rig::agent::MultiTurnStreamItem;
use rig::streaming::{StreamedAssistantContent, StreamingPrompt};
let waiting_msg = get_capability_message(capability);
crate::iris_status_dynamic!(IrisPhase::Initializing, waiting_msg.text, 1, 4);
let (mut system_prompt, output_type) = self.load_capability_config(capability)?;
self.inject_style_instructions(&mut system_prompt, capability);
self.current_capability = Some(capability.to_string());
crate::iris_status_dynamic!(
IrisPhase::Analysis,
"🔍 Iris is analyzing your changes...",
2,
4
);
let full_prompt = format!(
"{}\n\n{}\n\n{}",
system_prompt,
user_prompt,
streaming_response_instructions(capability)
);
let gen_msg = get_capability_message(capability);
crate::iris_status_dynamic!(IrisPhase::Generation, gen_msg.text, 3, 4);
macro_rules! consume_stream {
($stream:expr) => {{
let mut aggregated_text = String::new();
let mut stream = $stream;
while let Some(item) = stream.next().await {
match item {
Ok(MultiTurnStreamItem::StreamAssistantItem(
StreamedAssistantContent::Text(text),
)) => {
aggregated_text.push_str(&text.text);
on_chunk(&text.text, &aggregated_text);
}
Ok(MultiTurnStreamItem::StreamAssistantItem(
StreamedAssistantContent::ToolCall { tool_call, .. },
)) => {
let tool_name = &tool_call.function.name;
let reason = format!("Calling {}", tool_name);
crate::iris_status_dynamic!(
IrisPhase::ToolExecution {
tool_name: tool_name.clone(),
reason: reason.clone()
},
format!("🔧 {}", reason),
3,
4
);
}
Ok(MultiTurnStreamItem::FinalResponse(_)) => break,
Err(e) => return Err(anyhow::anyhow!("Streaming error: {}", e)),
_ => {}
}
}
aggregated_text
}};
}
let aggregated_text = match self.provider.as_str() {
"openai" => {
let agent = self.build_openai_agent_for_streaming(&full_prompt)?;
let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
consume_stream!(stream)
}
"anthropic" => {
let agent = self.build_anthropic_agent_for_streaming(&full_prompt)?;
let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
consume_stream!(stream)
}
"google" | "gemini" => {
let agent = self.build_gemini_agent_for_streaming(&full_prompt)?;
let stream = agent.stream_prompt(&full_prompt).multi_turn(50).await;
consume_stream!(stream)
}
_ => return Err(anyhow::anyhow!("Unsupported provider: {}", self.provider)),
};
crate::iris_status_dynamic!(
IrisPhase::Synthesis,
"✨ Iris is synthesizing results...",
4,
4
);
let response = Self::text_to_structured_response(&output_type, aggregated_text);
crate::iris_status_completed!();
Ok(response)
}
fn text_to_structured_response(output_type: &str, text: String) -> StructuredResponse {
match output_type {
"MarkdownReview" => {
StructuredResponse::MarkdownReview(crate::types::MarkdownReview { content: text })
}
"MarkdownPullRequest" => {
StructuredResponse::PullRequest(crate::types::MarkdownPullRequest { content: text })
}
"MarkdownChangelog" => {
StructuredResponse::Changelog(crate::types::MarkdownChangelog { content: text })
}
"MarkdownReleaseNotes" => {
StructuredResponse::ReleaseNotes(crate::types::MarkdownReleaseNotes {
content: text,
})
}
"SemanticBlame" => StructuredResponse::SemanticBlame(text),
_ => StructuredResponse::PlainText(text),
}
}
fn streaming_agent_config(&self) -> (&str, Option<&str>, u64) {
let fast_model = self.effective_fast_model();
let api_key = self.get_api_key();
let subagent_timeout = self
.config
.as_ref()
.map_or(120, |c| c.subagent_timeout_secs);
(fast_model, api_key, subagent_timeout)
}
fn build_openai_agent_for_streaming(
&self,
_prompt: &str,
) -> Result<rig::agent::Agent<provider::OpenAIModel>> {
let (fast_model, api_key, subagent_timeout) = self.streaming_agent_config();
build_streaming_agent!(
self,
provider::openai_builder,
fast_model,
api_key,
subagent_timeout
)
}
fn build_anthropic_agent_for_streaming(
&self,
_prompt: &str,
) -> Result<rig::agent::Agent<provider::AnthropicModel>> {
let (fast_model, api_key, subagent_timeout) = self.streaming_agent_config();
build_streaming_agent!(
self,
provider::anthropic_builder,
fast_model,
api_key,
subagent_timeout
)
}
fn build_gemini_agent_for_streaming(
&self,
_prompt: &str,
) -> Result<rig::agent::Agent<provider::GeminiModel>> {
let (fast_model, api_key, subagent_timeout) = self.streaming_agent_config();
build_streaming_agent!(
self,
provider::gemini_builder,
fast_model,
api_key,
subagent_timeout
)
}
fn load_capability_config(&self, capability: &str) -> Result<(String, String)> {
let _ = self; let content = match capability {
"commit" => CAPABILITY_COMMIT,
"pr" => CAPABILITY_PR,
"review" => CAPABILITY_REVIEW,
"changelog" => CAPABILITY_CHANGELOG,
"release_notes" => CAPABILITY_RELEASE_NOTES,
"chat" => CAPABILITY_CHAT,
"semantic_blame" => CAPABILITY_SEMANTIC_BLAME,
_ => {
return Ok((
format!(
"You are helping with a {capability} task. Use the available Git tools to assist the user."
),
"PlainText".to_string(),
));
}
};
let parsed: toml::Value = toml::from_str(content)?;
let task_prompt = parsed
.get("task_prompt")
.and_then(|v| v.as_str())
.ok_or_else(|| anyhow::anyhow!("No task_prompt found in capability file"))?;
let output_type = parsed
.get("output_type")
.and_then(|v| v.as_str())
.unwrap_or("PlainText")
.to_string();
Ok((task_prompt.to_string(), output_type))
}
#[must_use]
pub fn current_capability(&self) -> Option<&str> {
self.current_capability.as_deref()
}
pub async fn chat(&self, message: &str) -> Result<String> {
let agent = self.build_agent()?;
let response = agent.prompt(message).await?;
Ok(response)
}
pub fn set_capability(&mut self, capability: &str) {
self.current_capability = Some(capability.to_string());
}
#[must_use]
pub fn provider_config(&self) -> &HashMap<String, String> {
&self.provider_config
}
pub fn set_provider_config(&mut self, config: HashMap<String, String>) {
self.provider_config = config;
}
pub fn set_preamble(&mut self, preamble: String) {
self.preamble = Some(preamble);
}
pub fn set_config(&mut self, config: crate::config::Config) {
self.config = Some(config);
}
pub fn set_fast_model(&mut self, fast_model: String) {
self.fast_model = Some(fast_model);
}
}
pub struct IrisAgentBuilder {
provider: String,
model: String,
preamble: Option<String>,
}
impl IrisAgentBuilder {
#[must_use]
pub fn new() -> Self {
Self {
provider: "openai".to_string(),
model: "gpt-5.4".to_string(),
preamble: None,
}
}
pub fn with_provider(mut self, provider: impl Into<String>) -> Self {
self.provider = provider.into();
self
}
pub fn with_model(mut self, model: impl Into<String>) -> Self {
self.model = model.into();
self
}
pub fn with_preamble(mut self, preamble: impl Into<String>) -> Self {
self.preamble = Some(preamble.into());
self
}
pub fn build(self) -> Result<IrisAgent> {
let mut agent = IrisAgent::new(&self.provider, &self.model)?;
if let Some(preamble) = self.preamble {
agent.set_preamble(preamble);
}
Ok(agent)
}
}
impl Default for IrisAgentBuilder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::{
IrisAgent, extract_json_from_response, find_balanced_braces, sanitize_json_response,
streaming_response_instructions,
};
use serde_json::Value;
use std::borrow::Cow;
#[test]
fn sanitize_json_response_is_noop_for_valid_payloads() {
let raw = r#"{"title":"Test","description":"All good"}"#;
let sanitized = sanitize_json_response(raw);
assert!(matches!(sanitized, Cow::Borrowed(_)));
serde_json::from_str::<Value>(sanitized.as_ref()).expect("valid JSON");
}
#[test]
fn sanitize_json_response_escapes_literal_newlines() {
let raw = "{\"description\": \"Line1
Line2\"}";
let sanitized = sanitize_json_response(raw);
assert_eq!(sanitized.as_ref(), "{\"description\": \"Line1\\nLine2\"}");
serde_json::from_str::<Value>(sanitized.as_ref()).expect("json sanitized");
}
#[test]
fn chat_streaming_instructions_avoid_markdown_suffix() {
let instructions = streaming_response_instructions("chat");
assert!(instructions.contains("plain text"));
assert!(instructions.contains("do not repeat full content"));
assert!(!instructions.contains("markdown format"));
}
#[test]
fn structured_streaming_instructions_still_use_markdown_suffix() {
let instructions = streaming_response_instructions("review");
assert!(instructions.contains("markdown format"));
assert!(instructions.contains("well-structured"));
}
#[test]
fn find_balanced_braces_returns_first_balanced_pair() {
let (start, end) = find_balanced_braces("prefix {\"a\":1} suffix").expect("balanced pair");
assert_eq!(&"prefix {\"a\":1} suffix"[start..end], "{\"a\":1}");
}
#[test]
fn find_balanced_braces_returns_none_for_unbalanced() {
assert_eq!(find_balanced_braces("no braces here"), None);
assert_eq!(find_balanced_braces("{ unclosed"), None);
}
#[test]
fn extract_json_skips_github_actions_expression_false_positive() {
let response = r#"Looking at the diff, I see the new value `${{ github.ref_name }}` replacing the old bash expansion. Here's the commit:
{"emoji": "🔧", "title": "Upgrade AUR deploy action", "message": "Bump to v4.1.2 to fix bash --command error."}
"#;
let extracted = extract_json_from_response(response).expect("should recover real JSON");
let parsed: Value = serde_json::from_str(&extracted).expect("extracted value is JSON");
assert_eq!(parsed["emoji"], "🔧");
assert_eq!(parsed["title"], "Upgrade AUR deploy action");
}
#[test]
fn extract_json_from_pure_json_response() {
let response = r##"{"content": "# Heading\n\nBody text."}"##;
let extracted = extract_json_from_response(response).expect("pure JSON passes through");
assert_eq!(extracted, response);
}
#[test]
fn extract_json_errors_when_no_candidate_parses() {
let response = "prose ${{ template }} more prose";
let err = extract_json_from_response(response).expect_err("should fail");
let msg = err.to_string();
assert!(
msg.contains("Preview:"),
"error should include a preview: {msg}"
);
}
#[test]
fn pr_review_emoji_styling_uses_a_compact_gitmoji_guide() {
let mut prompt = String::new();
IrisAgent::inject_pr_review_emoji_styling(&mut prompt);
assert!(prompt.contains("Common gitmoji choices:"));
assert!(prompt.contains("`:feat:`"));
assert!(prompt.contains("`:fix:`"));
assert!(!prompt.contains("`:accessibility:`"));
assert!(!prompt.contains("`:analytics:`"));
}
}