Skip to main content

oven_cli/agents/
mod.rs

1pub mod fixer;
2pub mod implementer;
3pub mod merger;
4pub mod planner;
5pub mod reviewer;
6
7use std::path::PathBuf;
8
9use anyhow::{Context, Result};
10use serde::{Deserialize, de::DeserializeOwned};
11
12use crate::{db::ReviewFinding, process::CommandRunner};
13
14/// The five agent roles in the pipeline.
15#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
16pub enum AgentRole {
17    Planner,
18    Implementer,
19    Reviewer,
20    Fixer,
21    Merger,
22}
23
24impl AgentRole {
25    pub const fn allowed_tools(&self) -> &[&str] {
26        match self {
27            Self::Planner | Self::Reviewer => &["Read", "Glob", "Grep"],
28            Self::Implementer | Self::Fixer => &["Read", "Write", "Edit", "Glob", "Grep", "Bash"],
29            Self::Merger => &["Bash"],
30        }
31    }
32
33    pub const fn as_str(&self) -> &str {
34        match self {
35            Self::Planner => "planner",
36            Self::Implementer => "implementer",
37            Self::Reviewer => "reviewer",
38            Self::Fixer => "fixer",
39            Self::Merger => "merger",
40        }
41    }
42
43    pub fn tools_as_strings(&self) -> Vec<String> {
44        self.allowed_tools().iter().map(|s| (*s).to_string()).collect()
45    }
46}
47
48impl std::fmt::Display for AgentRole {
49    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
50        f.write_str(self.as_str())
51    }
52}
53
54impl std::str::FromStr for AgentRole {
55    type Err = anyhow::Error;
56
57    fn from_str(s: &str) -> Result<Self, Self::Err> {
58        match s {
59            "planner" => Ok(Self::Planner),
60            "implementer" => Ok(Self::Implementer),
61            "reviewer" => Ok(Self::Reviewer),
62            "fixer" => Ok(Self::Fixer),
63            "merger" => Ok(Self::Merger),
64            other => anyhow::bail!("unknown agent role: {other}"),
65        }
66    }
67}
68
69/// Context passed to agent prompt builders.
70#[derive(Debug, Clone)]
71pub struct AgentContext {
72    pub issue_number: u32,
73    pub issue_title: String,
74    pub issue_body: String,
75    pub branch: String,
76    pub pr_number: Option<u32>,
77    pub test_command: Option<String>,
78    pub lint_command: Option<String>,
79    pub review_findings: Option<Vec<ReviewFinding>>,
80    pub cycle: u32,
81    /// When set, indicates this is a multi-repo pipeline where the PR lives in a
82    /// different repo than the issue. The merger should skip closing the issue
83    /// (the executor handles it).
84    pub target_repo: Option<String>,
85    /// Issue source: "github" or "local". The merger skips `gh issue close`
86    /// for local issues since they're not on GitHub.
87    pub issue_source: String,
88    /// The default branch name (e.g. "main" or "master"). Used by the merger
89    /// to diff against the correct base.
90    pub base_branch: String,
91}
92
93/// An invocation ready to be sent to the process runner.
94pub struct AgentInvocation {
95    pub role: AgentRole,
96    pub prompt: String,
97    pub working_dir: PathBuf,
98    pub max_turns: Option<u32>,
99}
100
101/// Invoke an agent via the command runner.
102pub async fn invoke_agent<R: CommandRunner>(
103    runner: &R,
104    invocation: &AgentInvocation,
105) -> Result<crate::process::AgentResult> {
106    runner
107        .run_claude(
108            &invocation.prompt,
109            &invocation.role.tools_as_strings(),
110            &invocation.working_dir,
111            invocation.max_turns,
112        )
113        .await
114}
115
116/// Complexity classification from the planner agent.
117#[derive(Debug, Clone, Deserialize, PartialEq, Eq)]
118#[serde(rename_all = "lowercase")]
119pub enum Complexity {
120    Simple,
121    Full,
122}
123
124impl std::fmt::Display for Complexity {
125    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
126        f.write_str(match self {
127            Self::Simple => "simple",
128            Self::Full => "full",
129        })
130    }
131}
132
133impl std::str::FromStr for Complexity {
134    type Err = anyhow::Error;
135
136    fn from_str(s: &str) -> Result<Self, Self::Err> {
137        match s {
138            "simple" => Ok(Self::Simple),
139            "full" => Ok(Self::Full),
140            other => anyhow::bail!("unknown complexity: {other}"),
141        }
142    }
143}
144
145/// Structured output from the planner agent (legacy batch format).
146#[derive(Debug, Deserialize)]
147pub struct PlannerOutput {
148    pub batches: Vec<Batch>,
149    #[serde(default)]
150    pub total_issues: u32,
151    #[serde(default)]
152    pub parallel_capacity: u32,
153}
154
155#[derive(Debug, Deserialize)]
156pub struct Batch {
157    pub batch: u32,
158    pub issues: Vec<PlannedIssue>,
159    #[serde(default)]
160    pub reasoning: String,
161}
162
163#[derive(Debug, Deserialize)]
164pub struct PlannedIssue {
165    pub number: u32,
166    #[serde(default)]
167    pub title: String,
168    #[serde(default)]
169    pub area: String,
170    #[serde(default)]
171    pub predicted_files: Vec<String>,
172    #[serde(default)]
173    pub has_migration: bool,
174    #[serde(default = "default_full")]
175    pub complexity: Complexity,
176}
177
178const fn default_full() -> Complexity {
179    Complexity::Full
180}
181
182/// Structured output from the planner agent (DAG format with explicit dependencies).
183#[derive(Debug, Deserialize)]
184pub struct PlannerGraphOutput {
185    pub nodes: Vec<PlannedNode>,
186    #[serde(default)]
187    pub total_issues: u32,
188    #[serde(default)]
189    pub parallel_capacity: u32,
190}
191
192/// A single issue node in the planner's DAG output.
193#[derive(Debug, Deserialize)]
194pub struct PlannedNode {
195    pub number: u32,
196    #[serde(default)]
197    pub title: String,
198    #[serde(default)]
199    pub area: String,
200    #[serde(default)]
201    pub predicted_files: Vec<String>,
202    #[serde(default)]
203    pub has_migration: bool,
204    #[serde(default = "default_full")]
205    pub complexity: Complexity,
206    #[serde(default)]
207    pub depends_on: Vec<u32>,
208    #[serde(default)]
209    pub reasoning: String,
210}
211
212/// Context passed to the planner about existing graph state.
213#[derive(Debug, Clone)]
214pub struct GraphContextNode {
215    pub number: u32,
216    pub title: String,
217    pub state: crate::db::graph::NodeState,
218    pub area: String,
219    pub predicted_files: Vec<String>,
220    pub has_migration: bool,
221    pub depends_on: Vec<u32>,
222    pub target_repo: Option<String>,
223}
224
225/// Parse structured planner output from the planner's text response.
226///
227/// Tries the new DAG format first, falls back to the legacy batch format
228/// (converting batches into dependency edges).
229pub fn parse_planner_output(text: &str) -> Option<PlannerOutput> {
230    extract_json(text)
231}
232
233/// Parse planner output as a DAG. Tries the new format first, converts
234/// legacy batch format into equivalent DAG nodes if needed.
235pub fn parse_planner_graph_output(text: &str) -> Option<PlannerGraphOutput> {
236    // Try new DAG format first
237    if let Some(output) = extract_json::<PlannerGraphOutput>(text) {
238        return Some(output);
239    }
240
241    // Fall back to legacy batch format and convert
242    let legacy: PlannerOutput = extract_json(text)?;
243    Some(batches_to_graph_output(&legacy))
244}
245
246/// Convert a legacy batch-based planner output into a DAG output.
247///
248/// Issues in batch N+1 depend on all issues in batches 1..N (cumulative).
249fn batches_to_graph_output(legacy: &PlannerOutput) -> PlannerGraphOutput {
250    let mut nodes = Vec::new();
251    let mut prior_batch_issues: Vec<u32> = Vec::new();
252
253    for batch in &legacy.batches {
254        let depends_on = prior_batch_issues.clone();
255        for pi in &batch.issues {
256            nodes.push(PlannedNode {
257                number: pi.number,
258                title: pi.title.clone(),
259                area: pi.area.clone(),
260                predicted_files: pi.predicted_files.clone(),
261                has_migration: pi.has_migration,
262                complexity: pi.complexity.clone(),
263                depends_on: depends_on.clone(),
264                reasoning: batch.reasoning.clone(),
265            });
266        }
267        prior_batch_issues.extend(batch.issues.iter().map(|pi| pi.number));
268    }
269
270    PlannerGraphOutput {
271        total_issues: legacy.total_issues,
272        parallel_capacity: legacy.parallel_capacity,
273        nodes,
274    }
275}
276
277/// Structured output from the reviewer agent.
278#[derive(Debug, Deserialize)]
279pub struct ReviewOutput {
280    pub findings: Vec<Finding>,
281    #[serde(default)]
282    pub summary: String,
283}
284
285#[derive(Debug, Deserialize)]
286pub struct Finding {
287    pub severity: Severity,
288    pub category: String,
289    #[serde(default)]
290    pub file_path: Option<String>,
291    #[serde(default)]
292    pub line_number: Option<u32>,
293    pub message: String,
294}
295
296#[derive(Debug, Deserialize, PartialEq, Eq)]
297#[serde(rename_all = "lowercase")]
298pub enum Severity {
299    Critical,
300    Warning,
301    Info,
302}
303
304impl Severity {
305    pub const fn as_str(&self) -> &str {
306        match self {
307            Self::Critical => "critical",
308            Self::Warning => "warning",
309            Self::Info => "info",
310        }
311    }
312}
313
314impl std::fmt::Display for Severity {
315    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
316        f.write_str(self.as_str())
317    }
318}
319
320/// Structured output from the fixer agent.
321#[derive(Debug, Deserialize, Default)]
322pub struct FixerOutput {
323    #[serde(default)]
324    pub addressed: Vec<FixerAction>,
325    #[serde(default)]
326    pub disputed: Vec<FixerDispute>,
327}
328
329#[derive(Debug, Deserialize)]
330pub struct FixerAction {
331    /// 1-indexed finding number from the fixer prompt's `<review_findings>` list.
332    pub finding: u32,
333    pub action: String,
334}
335
336#[derive(Debug, Deserialize)]
337pub struct FixerDispute {
338    /// 1-indexed finding number from the fixer prompt's `<review_findings>` list.
339    pub finding: u32,
340    pub reason: String,
341}
342
343/// Parse structured fixer output from the fixer's text response.
344///
345/// Returns a default (empty) `FixerOutput` if parsing fails. We don't want
346/// a fixer parse failure to block the pipeline, and this preserves backward
347/// compatibility with fixers that produce prose-only output.
348pub fn parse_fixer_output(text: &str) -> FixerOutput {
349    extract_json::<FixerOutput>(text).unwrap_or_default()
350}
351
352/// Parse structured review output from the reviewer's text response.
353///
354/// The JSON may be wrapped in markdown code fences. Returns an error if the
355/// output is unparseable -- callers should treat this as a review failure,
356/// not a clean pass (which could let unreviewed code through to merge).
357pub fn parse_review_output(text: &str) -> Result<ReviewOutput> {
358    extract_json(text).context("reviewer returned unparseable output (no valid JSON found)")
359}
360
361/// Try to extract a JSON object of type `T` from text that may contain prose,
362/// code fences, or raw JSON.
363///
364/// Attempts three strategies in order:
365/// 1. Direct `serde_json::from_str`
366/// 2. JSON inside markdown code fences
367/// 3. First `{` to last `}` in the text
368fn extract_json<T: DeserializeOwned>(text: &str) -> Option<T> {
369    if let Ok(val) = serde_json::from_str::<T>(text) {
370        return Some(val);
371    }
372
373    if let Some(json_str) = extract_json_from_fences(text) {
374        if let Ok(val) = serde_json::from_str::<T>(json_str) {
375            return Some(val);
376        }
377    }
378
379    let start = text.find('{')?;
380    let end = text.rfind('}')?;
381    if end > start { serde_json::from_str::<T>(&text[start..=end]).ok() } else { None }
382}
383
384fn extract_json_from_fences(text: &str) -> Option<&str> {
385    let start_markers = ["```json\n", "```json\r\n", "```\n", "```\r\n"];
386    for marker in &start_markers {
387        if let Some(start) = text.find(marker) {
388            let content_start = start + marker.len();
389            if let Some(end) = text[content_start..].find("```") {
390                return Some(&text[content_start..content_start + end]);
391            }
392        }
393    }
394    None
395}
396
397#[cfg(test)]
398mod tests {
399    use proptest::prelude::*;
400
401    use super::*;
402
403    const ALL_ROLES: [AgentRole; 5] = [
404        AgentRole::Planner,
405        AgentRole::Implementer,
406        AgentRole::Reviewer,
407        AgentRole::Fixer,
408        AgentRole::Merger,
409    ];
410
411    proptest! {
412        #[test]
413        fn agent_role_display_fromstr_roundtrip(idx in 0..5usize) {
414            let role = ALL_ROLES[idx];
415            let s = role.to_string();
416            let parsed: AgentRole = s.parse().unwrap();
417            assert_eq!(role, parsed);
418        }
419
420        #[test]
421        fn arbitrary_strings_never_panic_on_role_parse(s in "\\PC{1,50}") {
422            let _ = s.parse::<AgentRole>();
423        }
424
425        #[test]
426        fn parse_review_output_never_panics(text in "\\PC{0,500}") {
427            // parse_review_output should never panic on any input (may return Err)
428            let _ = parse_review_output(&text);
429        }
430
431        #[test]
432        fn parse_fixer_output_never_panics(text in "\\PC{0,500}") {
433            let _ = parse_fixer_output(&text);
434        }
435
436        #[test]
437        fn valid_review_json_always_parses(
438            severity in prop_oneof!["critical", "warning", "info"],
439            category in "[a-z]{3,15}",
440            message in "[a-zA-Z0-9 ]{1,50}",
441        ) {
442            let json = format!(
443                r#"{{"findings":[{{"severity":"{severity}","category":"{category}","message":"{message}"}}],"summary":"test"}}"#
444            );
445            let output = parse_review_output(&json).unwrap();
446            assert_eq!(output.findings.len(), 1);
447            assert_eq!(output.findings[0].category, category);
448        }
449
450        #[test]
451        fn review_json_in_fences_parses(
452            severity in prop_oneof!["critical", "warning", "info"],
453            category in "[a-z]{3,15}",
454            message in "[a-zA-Z0-9 ]{1,50}",
455            prefix in "[a-zA-Z ]{0,30}",
456            suffix in "[a-zA-Z ]{0,30}",
457        ) {
458            let json = format!(
459                r#"{{"findings":[{{"severity":"{severity}","category":"{category}","message":"{message}"}}],"summary":"ok"}}"#
460            );
461            let text = format!("{prefix}\n```json\n{json}\n```\n{suffix}");
462            let output = parse_review_output(&text).unwrap();
463            assert_eq!(output.findings.len(), 1);
464        }
465    }
466
467    #[test]
468    fn tool_scoping_per_role() {
469        assert_eq!(AgentRole::Planner.allowed_tools(), &["Read", "Glob", "Grep"]);
470        assert_eq!(
471            AgentRole::Implementer.allowed_tools(),
472            &["Read", "Write", "Edit", "Glob", "Grep", "Bash"]
473        );
474        assert_eq!(AgentRole::Reviewer.allowed_tools(), &["Read", "Glob", "Grep"]);
475        assert_eq!(
476            AgentRole::Fixer.allowed_tools(),
477            &["Read", "Write", "Edit", "Glob", "Grep", "Bash"]
478        );
479        assert_eq!(AgentRole::Merger.allowed_tools(), &["Bash"]);
480    }
481
482    #[test]
483    fn role_display_roundtrip() {
484        let roles = [
485            AgentRole::Planner,
486            AgentRole::Implementer,
487            AgentRole::Reviewer,
488            AgentRole::Fixer,
489            AgentRole::Merger,
490        ];
491        for role in roles {
492            let s = role.to_string();
493            let parsed: AgentRole = s.parse().unwrap();
494            assert_eq!(role, parsed);
495        }
496    }
497
498    #[test]
499    fn parse_review_output_valid_json() {
500        let json = r#"{"findings":[{"severity":"critical","category":"bug","file_path":"src/main.rs","line_number":10,"message":"null pointer"}],"summary":"one issue found"}"#;
501        let output = parse_review_output(json).unwrap();
502        assert_eq!(output.findings.len(), 1);
503        assert_eq!(output.findings[0].severity, Severity::Critical);
504        assert_eq!(output.findings[0].message, "null pointer");
505        assert_eq!(output.summary, "one issue found");
506    }
507
508    #[test]
509    fn parse_review_output_in_code_fences() {
510        let text = r#"Here are my findings:
511
512```json
513{"findings":[{"severity":"warning","category":"style","message":"missing docs"}],"summary":"ok"}
514```
515
516That's it."#;
517        let output = parse_review_output(text).unwrap();
518        assert_eq!(output.findings.len(), 1);
519        assert_eq!(output.findings[0].severity, Severity::Warning);
520    }
521
522    #[test]
523    fn parse_review_output_embedded_json() {
524        let text = r#"I reviewed the code and found: {"findings":[{"severity":"info","category":"note","message":"looks fine"}],"summary":"clean"} end of review"#;
525        let output = parse_review_output(text).unwrap();
526        assert_eq!(output.findings.len(), 1);
527    }
528
529    #[test]
530    fn parse_review_output_no_json_returns_error() {
531        let text = "The code looks great, no issues found.";
532        let result = parse_review_output(text);
533        assert!(result.is_err());
534        assert!(result.unwrap_err().to_string().contains("unparseable"));
535    }
536
537    #[test]
538    fn parse_review_output_malformed_json_returns_error() {
539        let text = r#"{"findings": [{"broken json"#;
540        let result = parse_review_output(text);
541        assert!(result.is_err());
542    }
543
544    // --- Planner output parsing tests ---
545
546    #[test]
547    fn parse_planner_output_valid_json() {
548        let json = r#"{
549            "batches": [{
550                "batch": 1,
551                "issues": [{
552                    "number": 42,
553                    "title": "Add login",
554                    "area": "auth",
555                    "predicted_files": ["src/auth.rs"],
556                    "has_migration": false,
557                    "complexity": "simple"
558                }],
559                "reasoning": "standalone issue"
560            }],
561            "total_issues": 1,
562            "parallel_capacity": 1
563        }"#;
564        let output = parse_planner_output(json).unwrap();
565        assert_eq!(output.batches.len(), 1);
566        assert_eq!(output.batches[0].issues.len(), 1);
567        assert_eq!(output.batches[0].issues[0].number, 42);
568        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Simple);
569        assert!(!output.batches[0].issues[0].has_migration);
570    }
571
572    #[test]
573    fn parse_planner_output_in_code_fences() {
574        let text = r#"Here's the plan:
575
576```json
577{
578    "batches": [{"batch": 1, "issues": [{"number": 1, "complexity": "full"}], "reasoning": "ok"}],
579    "total_issues": 1,
580    "parallel_capacity": 1
581}
582```
583
584That's the plan."#;
585        let output = parse_planner_output(text).unwrap();
586        assert_eq!(output.batches.len(), 1);
587        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Full);
588    }
589
590    #[test]
591    fn parse_planner_output_malformed_returns_none() {
592        assert!(parse_planner_output("not json at all").is_none());
593        assert!(parse_planner_output(r#"{"batches": "broken"}"#).is_none());
594        assert!(parse_planner_output("").is_none());
595    }
596
597    #[test]
598    fn complexity_deserializes_from_strings() {
599        let simple: Complexity = serde_json::from_str(r#""simple""#).unwrap();
600        assert_eq!(simple, Complexity::Simple);
601        let full: Complexity = serde_json::from_str(r#""full""#).unwrap();
602        assert_eq!(full, Complexity::Full);
603    }
604
605    #[test]
606    fn complexity_display_roundtrip() {
607        for c in [Complexity::Simple, Complexity::Full] {
608            let s = c.to_string();
609            let parsed: Complexity = s.parse().unwrap();
610            assert_eq!(c, parsed);
611        }
612    }
613
614    #[test]
615    fn planner_output_defaults_complexity_to_full() {
616        let json = r#"{"batches": [{"batch": 1, "issues": [{"number": 5}], "reasoning": ""}], "total_issues": 1, "parallel_capacity": 1}"#;
617        let output = parse_planner_output(json).unwrap();
618        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Full);
619    }
620
621    #[test]
622    fn planner_output_with_multiple_batches() {
623        let json = r#"{
624            "batches": [
625                {"batch": 1, "issues": [{"number": 1, "complexity": "simple"}, {"number": 2, "complexity": "simple"}], "reasoning": "independent"},
626                {"batch": 2, "issues": [{"number": 3, "complexity": "full"}], "reasoning": "depends on batch 1"}
627            ],
628            "total_issues": 3,
629            "parallel_capacity": 2
630        }"#;
631        let output = parse_planner_output(json).unwrap();
632        assert_eq!(output.batches.len(), 2);
633        assert_eq!(output.batches[0].issues.len(), 2);
634        assert_eq!(output.batches[1].issues.len(), 1);
635        assert_eq!(output.total_issues, 3);
636    }
637
638    // --- DAG planner output parsing tests ---
639
640    #[test]
641    fn parse_graph_output_new_format() {
642        let json = r#"{
643            "nodes": [
644                {"number": 1, "title": "A", "area": "cli", "depends_on": [], "complexity": "simple"},
645                {"number": 2, "title": "B", "area": "db", "depends_on": [1], "complexity": "full"}
646            ],
647            "total_issues": 2,
648            "parallel_capacity": 2
649        }"#;
650        let output = parse_planner_graph_output(json).unwrap();
651        assert_eq!(output.nodes.len(), 2);
652        assert!(output.nodes[0].depends_on.is_empty());
653        assert_eq!(output.nodes[1].depends_on, vec![1]);
654    }
655
656    #[test]
657    fn parse_graph_output_falls_back_to_batch_format() {
658        let json = r#"{
659            "batches": [
660                {"batch": 1, "issues": [{"number": 1, "complexity": "simple"}, {"number": 2, "complexity": "simple"}], "reasoning": "ok"},
661                {"batch": 2, "issues": [{"number": 3, "complexity": "full"}], "reasoning": "deps"}
662            ],
663            "total_issues": 3,
664            "parallel_capacity": 2
665        }"#;
666        let output = parse_planner_graph_output(json).unwrap();
667        assert_eq!(output.nodes.len(), 3);
668        // Batch 1 issues have no dependencies
669        assert!(output.nodes[0].depends_on.is_empty());
670        assert!(output.nodes[1].depends_on.is_empty());
671        // Batch 2 issue depends on batch 1 issues
672        let mut deps = output.nodes[2].depends_on.clone();
673        deps.sort_unstable();
674        assert_eq!(deps, vec![1, 2]);
675    }
676
677    #[test]
678    fn parse_graph_output_malformed_returns_none() {
679        assert!(parse_planner_graph_output("garbage").is_none());
680    }
681
682    #[test]
683    fn batches_to_graph_three_batches() {
684        let legacy = PlannerOutput {
685            batches: vec![
686                Batch {
687                    batch: 1,
688                    issues: vec![PlannedIssue {
689                        number: 1,
690                        title: "A".into(),
691                        area: "a".into(),
692                        predicted_files: vec![],
693                        has_migration: false,
694                        complexity: Complexity::Simple,
695                    }],
696                    reasoning: String::new(),
697                },
698                Batch {
699                    batch: 2,
700                    issues: vec![PlannedIssue {
701                        number: 2,
702                        title: "B".into(),
703                        area: "b".into(),
704                        predicted_files: vec![],
705                        has_migration: false,
706                        complexity: Complexity::Full,
707                    }],
708                    reasoning: String::new(),
709                },
710                Batch {
711                    batch: 3,
712                    issues: vec![PlannedIssue {
713                        number: 3,
714                        title: "C".into(),
715                        area: "c".into(),
716                        predicted_files: vec![],
717                        has_migration: false,
718                        complexity: Complexity::Full,
719                    }],
720                    reasoning: String::new(),
721                },
722            ],
723            total_issues: 3,
724            parallel_capacity: 1,
725        };
726
727        let output = batches_to_graph_output(&legacy);
728        assert_eq!(output.nodes.len(), 3);
729        assert!(output.nodes[0].depends_on.is_empty()); // batch 1
730        assert_eq!(output.nodes[1].depends_on, vec![1]); // batch 2
731        let mut deps = output.nodes[2].depends_on.clone();
732        deps.sort_unstable();
733        assert_eq!(deps, vec![1, 2]); // batch 3
734    }
735
736    // --- Fixer output parsing tests ---
737
738    #[test]
739    fn parse_fixer_output_valid_json() {
740        let json = r#"{"addressed":[{"finding":1,"action":"fixed"}],"disputed":[{"finding":2,"reason":"prop does not exist in v2"}]}"#;
741        let output = parse_fixer_output(json);
742        assert_eq!(output.addressed.len(), 1);
743        assert_eq!(output.addressed[0].finding, 1);
744        assert_eq!(output.disputed.len(), 1);
745        assert_eq!(output.disputed[0].finding, 2);
746        assert_eq!(output.disputed[0].reason, "prop does not exist in v2");
747    }
748
749    #[test]
750    fn parse_fixer_output_in_code_fences() {
751        let text = "I fixed everything.\n\n```json\n{\"addressed\":[{\"finding\":1,\"action\":\"added test\"}],\"disputed\":[]}\n```\n\nDone.";
752        let output = parse_fixer_output(text);
753        assert_eq!(output.addressed.len(), 1);
754        assert!(output.disputed.is_empty());
755    }
756
757    #[test]
758    fn parse_fixer_output_missing_disputed_defaults_empty() {
759        let json = r#"{"addressed":[{"finding":1,"action":"fixed"}]}"#;
760        let output = parse_fixer_output(json);
761        assert_eq!(output.addressed.len(), 1);
762        assert!(output.disputed.is_empty());
763    }
764
765    #[test]
766    fn parse_fixer_output_missing_addressed_defaults_empty() {
767        let json = r#"{"disputed":[{"finding":1,"reason":"API removed"}]}"#;
768        let output = parse_fixer_output(json);
769        assert!(output.addressed.is_empty());
770        assert_eq!(output.disputed.len(), 1);
771    }
772
773    #[test]
774    fn parse_fixer_output_garbage_returns_default() {
775        let output = parse_fixer_output("This is just prose, no JSON here.");
776        assert!(output.addressed.is_empty());
777        assert!(output.disputed.is_empty());
778    }
779
780    #[test]
781    fn parse_fixer_output_empty_returns_default() {
782        let output = parse_fixer_output("");
783        assert!(output.addressed.is_empty());
784        assert!(output.disputed.is_empty());
785    }
786}