git-iris 2.0.8

AI-powered Git workflow assistant for smart commits, code reviews, changelogs, and release notes
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
//! Parallel Analysis Tool
//!
//! Enables Iris to spawn multiple independent subagents that analyze different
//! portions of a codebase concurrently. This prevents context overflow when
//! dealing with large changesets by distributing work across separate context windows.

use anyhow::Result;
use rig::{
    client::{CompletionClient, ProviderClient},
    completion::{Prompt, ToolDefinition},
    providers::{anthropic, gemini, openai},
    tool::Tool,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;

use crate::agents::debug as agent_debug;
use crate::agents::provider::{
    CompletionProfile, apply_completion_params, provider_from_name, resolve_api_key,
};
use crate::providers::Provider;

/// Default timeout for individual subagent tasks (2 minutes)
const DEFAULT_SUBAGENT_TIMEOUT_SECS: u64 = 120;

/// Arguments for parallel analysis
#[derive(Debug, Deserialize, JsonSchema)]
pub struct ParallelAnalyzeArgs {
    /// List of analysis tasks to run in parallel.
    /// Each task should be a focused prompt describing what to analyze.
    /// Example: `["Analyze security changes in auth/", "Review performance in db/"]`
    pub tasks: Vec<String>,
}

/// Result from a single subagent analysis
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SubagentResult {
    /// The original task prompt
    pub task: String,
    /// The analysis result
    pub result: String,
    /// Whether the analysis succeeded
    pub success: bool,
    /// Error message if failed
    pub error: Option<String>,
}

/// Aggregated results from all parallel analyses
#[derive(Debug, Serialize, Deserialize)]
pub struct ParallelAnalyzeResult {
    /// Results from each subagent
    pub results: Vec<SubagentResult>,
    /// Number of successful analyses
    pub successful: usize,
    /// Number of failed analyses
    pub failed: usize,
    /// Total execution time in milliseconds
    pub execution_time_ms: u64,
}

/// Provider-specific subagent runner
#[derive(Clone)]
enum SubagentRunner {
    OpenAI {
        client: openai::Client,
        model: String,
        additional_params: HashMap<String, String>,
    },
    Anthropic {
        client: anthropic::Client,
        model: String,
        additional_params: HashMap<String, String>,
    },
    Gemini {
        client: gemini::Client,
        model: String,
        additional_params: HashMap<String, String>,
    },
}

impl SubagentRunner {
    fn new(
        provider: &str,
        model: &str,
        api_key: Option<&str>,
        additional_params: HashMap<String, String>,
    ) -> Result<Self> {
        match provider {
            "openai" => {
                let client = Self::resolve_openai_client(api_key)?;
                Ok(Self::OpenAI {
                    client,
                    model: model.to_string(),
                    additional_params,
                })
            }
            "anthropic" => {
                let client = Self::resolve_anthropic_client(api_key)?;
                Ok(Self::Anthropic {
                    client,
                    model: model.to_string(),
                    additional_params,
                })
            }
            "google" | "gemini" => {
                let client = Self::resolve_gemini_client(api_key)?;
                Ok(Self::Gemini {
                    client,
                    model: model.to_string(),
                    additional_params,
                })
            }
            _ => Err(anyhow::anyhow!(
                "Unsupported provider for parallel analysis: {}. Supported: openai, anthropic, google",
                provider
            )),
        }
    }

    /// Create `OpenAI` client using shared resolution logic
    ///
    /// Uses `resolve_api_key` from provider module to maintain consistent
    /// resolution order: config → env var → client default
    fn resolve_openai_client(api_key: Option<&str>) -> Result<openai::Client> {
        let (resolved_key, _source) = resolve_api_key(api_key, Provider::OpenAI);
        match resolved_key {
            Some(key) => openai::Client::new(&key)
                // Sanitize error to avoid exposing key material
                .map_err(|_| {
                    anyhow::anyhow!(
                        "Failed to create OpenAI client: authentication or configuration error"
                    )
                }),
            None => Ok(openai::Client::from_env()),
        }
    }

    /// Create `Anthropic` client using shared resolution logic
    ///
    /// Uses `resolve_api_key` from provider module to maintain consistent
    /// resolution order: config → env var → client default
    fn resolve_anthropic_client(api_key: Option<&str>) -> Result<anthropic::Client> {
        let (resolved_key, _source) = resolve_api_key(api_key, Provider::Anthropic);
        match resolved_key {
            Some(key) => anthropic::Client::new(&key)
                // Sanitize error to avoid exposing key material
                .map_err(|_| {
                    anyhow::anyhow!(
                        "Failed to create Anthropic client: authentication or configuration error"
                    )
                }),
            None => Ok(anthropic::Client::from_env()),
        }
    }

    /// Create `Gemini` client using shared resolution logic
    ///
    /// Uses `resolve_api_key` from provider module to maintain consistent
    /// resolution order: config → env var → client default
    fn resolve_gemini_client(api_key: Option<&str>) -> Result<gemini::Client> {
        let (resolved_key, _source) = resolve_api_key(api_key, Provider::Google);
        match resolved_key {
            Some(key) => gemini::Client::new(&key)
                // Sanitize error to avoid exposing key material
                .map_err(|_| {
                    anyhow::anyhow!(
                        "Failed to create Gemini client: authentication or configuration error"
                    )
                }),
            None => Ok(gemini::Client::from_env()),
        }
    }

    async fn run_task(&self, task: &str) -> SubagentResult {
        let preamble = "You are a specialized analysis sub-agent. Complete the assigned \
            task thoroughly and return a focused summary.\n\n\
            Guidelines:\n\
            - Use the available tools to gather necessary information\n\
            - Focus only on what's asked\n\
            - Return a clear, structured summary\n\
            - Be concise but comprehensive";

        // Use shared tool registry for consistent tool attachment
        let result = match self {
            Self::OpenAI {
                client,
                model,
                additional_params,
            } => {
                let builder = client.agent(model).preamble(preamble);
                let builder = apply_completion_params(
                    builder,
                    Provider::OpenAI,
                    model,
                    4096,
                    Some(additional_params),
                    CompletionProfile::Subagent,
                );
                let agent = crate::attach_core_tools!(builder).build();
                agent.prompt(task).await
            }
            Self::Anthropic {
                client,
                model,
                additional_params,
            } => {
                let builder = client.agent(model).preamble(preamble);
                let builder = apply_completion_params(
                    builder,
                    Provider::Anthropic,
                    model,
                    4096,
                    Some(additional_params),
                    CompletionProfile::Subagent,
                );
                let agent = crate::attach_core_tools!(builder).build();
                agent.prompt(task).await
            }
            Self::Gemini {
                client,
                model,
                additional_params,
            } => {
                let builder = client.agent(model).preamble(preamble);
                let builder = apply_completion_params(
                    builder,
                    Provider::Google,
                    model,
                    4096,
                    Some(additional_params),
                    CompletionProfile::Subagent,
                );
                let agent = crate::attach_core_tools!(builder).build();
                agent.prompt(task).await
            }
        };

        match result {
            Ok(response) => SubagentResult {
                task: task.to_string(),
                result: response,
                success: true,
                error: None,
            },
            Err(e) => SubagentResult {
                task: task.to_string(),
                result: String::new(),
                success: false,
                error: Some(e.to_string()),
            },
        }
    }
}

/// Parallel analysis tool
/// Spawns multiple subagents to analyze different aspects concurrently
pub struct ParallelAnalyze {
    runner: SubagentRunner,
    model: String,
    /// Timeout in seconds for each subagent task
    timeout_secs: u64,
}

impl ParallelAnalyze {
    /// Create a new parallel analyzer with default timeout
    ///
    /// # Errors
    ///
    /// Returns an error when the requested provider runner cannot be created.
    pub fn new(provider: &str, model: &str, api_key: Option<&str>) -> Result<Self> {
        Self::with_timeout(
            provider,
            model,
            DEFAULT_SUBAGENT_TIMEOUT_SECS,
            api_key,
            None,
        )
    }

    /// Create a new parallel analyzer with custom timeout
    ///
    /// # Errors
    ///
    /// Returns an error when the requested provider runner cannot be created.
    pub fn with_timeout(
        provider: &str,
        model: &str,
        timeout_secs: u64,
        api_key: Option<&str>,
        additional_params: Option<HashMap<String, String>>,
    ) -> Result<Self> {
        let provider_name = provider_from_name(provider)?;
        // Create runner for the requested provider - no silent fallback
        // If the user configures Anthropic, they should get Anthropic or a clear error
        let runner = SubagentRunner::new(
            provider_name.name(),
            model,
            api_key,
            additional_params.unwrap_or_default(),
        )
        .map_err(|e| {
            anyhow::anyhow!(
                "Failed to create {} runner: {}. Check API key and network connectivity.",
                provider,
                e
            )
        })?;

        Ok(Self {
            runner,
            model: model.to_string(),
            timeout_secs,
        })
    }
}

// Use standard tool error macro for consistency
crate::define_tool_error!(ParallelAnalyzeError);

impl Tool for ParallelAnalyze {
    const NAME: &'static str = "parallel_analyze";
    type Error = ParallelAnalyzeError;
    type Args = ParallelAnalyzeArgs;
    type Output = ParallelAnalyzeResult;

    async fn definition(&self, _prompt: String) -> ToolDefinition {
        ToolDefinition {
            name: Self::NAME.to_string(),
            description: "Run multiple analysis tasks in parallel using independent subagents. \
                         Each subagent has its own context window, preventing overflow when \
                         analyzing large changesets. Use this when you have multiple independent \
                         analysis tasks that can run concurrently.\n\n\
                         Best for:\n\
                         - Analyzing different directories/modules separately\n\
                         - Processing many commits in batches\n\
                         - Running different types of analysis (security, performance, style) in parallel\n\n\
                         Each task should be a focused prompt. Results are aggregated and returned."
                .to_string(),
            parameters: json!({
                "type": "object",
                "properties": {
                    "tasks": {
                        "type": "array",
                        "items": { "type": "string" },
                        "description": "List of analysis task prompts to run in parallel. Each task runs in its own subagent with independent context.",
                        "minItems": 1,
                        "maxItems": 10
                    }
                },
                "required": ["tasks"]
            }),
        }
    }

    #[allow(clippy::cognitive_complexity)]
    async fn call(&self, args: Self::Args) -> Result<Self::Output, Self::Error> {
        use std::time::Instant;

        let start = Instant::now();
        let tasks = args.tasks;
        let num_tasks = tasks.len();

        agent_debug::debug_context_management(
            "ParallelAnalyze",
            &format!(
                "Spawning {} subagents (fast model: {})",
                num_tasks, self.model
            ),
        );

        // Pre-allocate results vector to preserve task ordering
        let results: Arc<Mutex<Vec<Option<SubagentResult>>>> =
            Arc::new(Mutex::new(vec![None; num_tasks]));

        // Spawn all tasks as parallel tokio tasks, tracking index for ordering
        let mut handles = Vec::new();
        let timeout = Duration::from_secs(self.timeout_secs);
        for (index, task) in tasks.into_iter().enumerate() {
            let runner = self.runner.clone();
            let results = Arc::clone(&results);
            let task_timeout = timeout;
            let timeout_secs = self.timeout_secs;

            let handle = tokio::spawn(async move {
                // Wrap task execution in timeout to prevent hanging
                let result = match tokio::time::timeout(task_timeout, runner.run_task(&task)).await
                {
                    Ok(result) => result,
                    Err(_) => SubagentResult {
                        task: task.clone(),
                        result: String::new(),
                        success: false,
                        error: Some(format!("Task timed out after {} seconds", timeout_secs)),
                    },
                };

                // Store result at original index to preserve ordering
                let mut guard = results.lock().await;
                guard[index] = Some(result);
            });

            handles.push(handle);
        }

        // Wait for all tasks to complete
        for handle in handles {
            if let Err(e) = handle.await {
                agent_debug::debug_warning(&format!("Subagent task panicked: {}", e));
            }
        }

        #[allow(clippy::cast_possible_truncation, clippy::as_conversions)]
        let execution_time_ms = start.elapsed().as_millis().min(u128::from(u64::MAX)) as u64;

        // Extract results, preserving original task order
        let final_results: Vec<SubagentResult> = Arc::try_unwrap(results)
            .map_err(|_| ParallelAnalyzeError("Failed to unwrap results".to_string()))?
            .into_inner()
            .into_iter()
            .enumerate()
            .map(|(i, opt)| {
                opt.unwrap_or_else(|| SubagentResult {
                    task: format!("Task {}", i),
                    result: String::new(),
                    success: false,
                    error: Some("Task did not complete".to_string()),
                })
            })
            .collect();

        let successful = final_results.iter().filter(|r| r.success).count();
        let failed = final_results.iter().filter(|r| !r.success).count();

        agent_debug::debug_context_management(
            "ParallelAnalyze",
            &format!(
                "{}/{} successful in {}ms",
                successful, num_tasks, execution_time_ms
            ),
        );

        Ok(ParallelAnalyzeResult {
            results: final_results,
            successful,
            failed,
            execution_time_ms,
        })
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_parallel_analyze_args_schema() {
        let schema = schemars::schema_for!(ParallelAnalyzeArgs);
        let json = serde_json::to_string_pretty(&schema).expect("schema should serialize");
        assert!(json.contains("tasks"));
    }
}