Skip to main content

oven_cli/agents/
mod.rs

1pub mod fixer;
2pub mod implementer;
3pub mod planner;
4pub mod reviewer;
5
6use std::path::PathBuf;
7
8use anyhow::Result;
9use serde::{Deserialize, de::DeserializeOwned};
10
11use crate::{db::ReviewFinding, process::CommandRunner};
12
13/// The four agent roles in the pipeline.
14#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
15pub enum AgentRole {
16    Planner,
17    Implementer,
18    Reviewer,
19    Fixer,
20}
21
22impl AgentRole {
23    pub const fn allowed_tools(&self) -> &[&str] {
24        match self {
25            Self::Planner | Self::Reviewer => &["Read", "Glob", "Grep"],
26            Self::Implementer | Self::Fixer => &["Read", "Write", "Edit", "Glob", "Grep", "Bash"],
27        }
28    }
29
30    pub const fn as_str(&self) -> &str {
31        match self {
32            Self::Planner => "planner",
33            Self::Implementer => "implementer",
34            Self::Reviewer => "reviewer",
35            Self::Fixer => "fixer",
36        }
37    }
38
39    pub fn tools_as_strings(&self) -> Vec<String> {
40        self.allowed_tools().iter().map(|s| (*s).to_string()).collect()
41    }
42}
43
44impl std::fmt::Display for AgentRole {
45    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
46        f.write_str(self.as_str())
47    }
48}
49
50impl std::str::FromStr for AgentRole {
51    type Err = anyhow::Error;
52
53    fn from_str(s: &str) -> Result<Self, Self::Err> {
54        match s {
55            "planner" => Ok(Self::Planner),
56            "implementer" => Ok(Self::Implementer),
57            "reviewer" => Ok(Self::Reviewer),
58            "fixer" => Ok(Self::Fixer),
59            other => anyhow::bail!("unknown agent role: {other}"),
60        }
61    }
62}
63
64/// Context passed to agent prompt builders.
65#[derive(Debug, Clone)]
66pub struct AgentContext {
67    pub issue_number: u32,
68    pub issue_title: String,
69    pub issue_body: String,
70    pub branch: String,
71    pub pr_number: Option<u32>,
72    pub test_command: Option<String>,
73    pub lint_command: Option<String>,
74    pub review_findings: Option<Vec<ReviewFinding>>,
75    pub cycle: u32,
76    /// When set, indicates this is a multi-repo pipeline where the PR lives in a
77    /// different repo than the issue.
78    pub target_repo: Option<String>,
79    /// Issue source: "github" or "local".
80    pub issue_source: String,
81    /// The default branch name (e.g. "main" or "master").
82    pub base_branch: String,
83}
84
85/// An invocation ready to be sent to the process runner.
86pub struct AgentInvocation {
87    pub role: AgentRole,
88    pub prompt: String,
89    pub working_dir: PathBuf,
90    pub max_turns: Option<u32>,
91    pub model: Option<String>,
92}
93
94/// Invoke an agent via the command runner.
95pub async fn invoke_agent<R: CommandRunner>(
96    runner: &R,
97    invocation: &AgentInvocation,
98) -> Result<crate::process::AgentResult> {
99    runner
100        .run_claude(
101            &invocation.prompt,
102            &invocation.role.tools_as_strings(),
103            &invocation.working_dir,
104            invocation.max_turns,
105            invocation.model.clone(),
106        )
107        .await
108}
109
110/// Complexity classification from the planner agent.
111#[derive(Debug, Clone, Deserialize, PartialEq, Eq)]
112#[serde(rename_all = "lowercase")]
113pub enum Complexity {
114    Simple,
115    Full,
116}
117
118impl std::fmt::Display for Complexity {
119    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
120        f.write_str(match self {
121            Self::Simple => "simple",
122            Self::Full => "full",
123        })
124    }
125}
126
127impl std::str::FromStr for Complexity {
128    type Err = anyhow::Error;
129
130    fn from_str(s: &str) -> Result<Self, Self::Err> {
131        match s {
132            "simple" => Ok(Self::Simple),
133            "full" => Ok(Self::Full),
134            other => anyhow::bail!("unknown complexity: {other}"),
135        }
136    }
137}
138
139/// Structured output from the planner agent (legacy batch format).
140#[derive(Debug, Deserialize)]
141pub struct PlannerOutput {
142    pub batches: Vec<Batch>,
143    #[serde(default)]
144    pub total_issues: u32,
145    #[serde(default)]
146    pub parallel_capacity: u32,
147}
148
149#[derive(Debug, Deserialize)]
150pub struct Batch {
151    pub batch: u32,
152    pub issues: Vec<PlannedIssue>,
153    #[serde(default)]
154    pub reasoning: String,
155}
156
157#[derive(Debug, Deserialize)]
158pub struct PlannedIssue {
159    pub number: u32,
160    #[serde(default)]
161    pub title: String,
162    #[serde(default)]
163    pub area: String,
164    #[serde(default)]
165    pub predicted_files: Vec<String>,
166    #[serde(default)]
167    pub has_migration: bool,
168    #[serde(default = "default_full")]
169    pub complexity: Complexity,
170}
171
172const fn default_full() -> Complexity {
173    Complexity::Full
174}
175
176/// Structured output from the planner agent (DAG format with explicit dependencies).
177#[derive(Debug, Deserialize)]
178pub struct PlannerGraphOutput {
179    pub nodes: Vec<PlannedNode>,
180    #[serde(default)]
181    pub total_issues: u32,
182    #[serde(default)]
183    pub parallel_capacity: u32,
184}
185
186/// A single issue node in the planner's DAG output.
187#[derive(Debug, Deserialize)]
188pub struct PlannedNode {
189    pub number: u32,
190    #[serde(default)]
191    pub title: String,
192    #[serde(default)]
193    pub area: String,
194    #[serde(default)]
195    pub predicted_files: Vec<String>,
196    #[serde(default)]
197    pub has_migration: bool,
198    #[serde(default = "default_full")]
199    pub complexity: Complexity,
200    #[serde(default)]
201    pub depends_on: Vec<u32>,
202    #[serde(default)]
203    pub reasoning: String,
204}
205
206/// Context passed to the planner about existing graph state.
207#[derive(Debug, Clone)]
208pub struct GraphContextNode {
209    pub number: u32,
210    pub title: String,
211    pub state: crate::db::graph::NodeState,
212    pub area: String,
213    pub predicted_files: Vec<String>,
214    pub has_migration: bool,
215    pub depends_on: Vec<u32>,
216    pub target_repo: Option<String>,
217}
218
219/// Parse structured planner output from the planner's text response.
220///
221/// Tries the new DAG format first, falls back to the legacy batch format
222/// (converting batches into dependency edges).
223pub fn parse_planner_output(text: &str) -> Option<PlannerOutput> {
224    extract_json(text)
225}
226
227/// Parse planner output as a DAG. Tries the new format first, converts
228/// legacy batch format into equivalent DAG nodes if needed.
229pub fn parse_planner_graph_output(text: &str) -> Option<PlannerGraphOutput> {
230    // Try new DAG format first
231    if let Some(output) = extract_json::<PlannerGraphOutput>(text) {
232        return Some(output);
233    }
234
235    // Fall back to legacy batch format and convert
236    let legacy: PlannerOutput = extract_json(text)?;
237    Some(batches_to_graph_output(&legacy))
238}
239
240/// Convert a legacy batch-based planner output into a DAG output.
241///
242/// Issues in batch N+1 depend on all issues in batches 1..N (cumulative).
243fn batches_to_graph_output(legacy: &PlannerOutput) -> PlannerGraphOutput {
244    let mut nodes = Vec::new();
245    let mut prior_batch_issues: Vec<u32> = Vec::new();
246
247    for batch in &legacy.batches {
248        let depends_on = prior_batch_issues.clone();
249        for pi in &batch.issues {
250            nodes.push(PlannedNode {
251                number: pi.number,
252                title: pi.title.clone(),
253                area: pi.area.clone(),
254                predicted_files: pi.predicted_files.clone(),
255                has_migration: pi.has_migration,
256                complexity: pi.complexity.clone(),
257                depends_on: depends_on.clone(),
258                reasoning: batch.reasoning.clone(),
259            });
260        }
261        prior_batch_issues.extend(batch.issues.iter().map(|pi| pi.number));
262    }
263
264    PlannerGraphOutput {
265        total_issues: legacy.total_issues,
266        parallel_capacity: legacy.parallel_capacity,
267        nodes,
268    }
269}
270
271/// Structured output from the reviewer agent.
272#[derive(Debug, Deserialize)]
273pub struct ReviewOutput {
274    pub findings: Vec<Finding>,
275    #[serde(default)]
276    pub summary: String,
277}
278
279#[derive(Debug, Deserialize)]
280pub struct Finding {
281    pub severity: Severity,
282    pub category: String,
283    #[serde(default)]
284    pub file_path: Option<String>,
285    #[serde(default)]
286    pub line_number: Option<u32>,
287    pub message: String,
288}
289
290#[derive(Debug, Deserialize, PartialEq, Eq)]
291#[serde(rename_all = "lowercase")]
292pub enum Severity {
293    Critical,
294    Warning,
295    Info,
296}
297
298impl Severity {
299    pub const fn as_str(&self) -> &str {
300        match self {
301            Self::Critical => "critical",
302            Self::Warning => "warning",
303            Self::Info => "info",
304        }
305    }
306}
307
308impl std::fmt::Display for Severity {
309    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
310        f.write_str(self.as_str())
311    }
312}
313
314/// Structured output from the fixer agent.
315#[derive(Debug, Deserialize, Default)]
316pub struct FixerOutput {
317    #[serde(default)]
318    pub addressed: Vec<FixerAction>,
319    #[serde(default)]
320    pub disputed: Vec<FixerDispute>,
321}
322
323#[derive(Debug, Deserialize)]
324pub struct FixerAction {
325    /// 1-indexed finding number from the fixer prompt's `<review_findings>` list.
326    pub finding: u32,
327    pub action: String,
328}
329
330#[derive(Debug, Deserialize)]
331pub struct FixerDispute {
332    /// 1-indexed finding number from the fixer prompt's `<review_findings>` list.
333    pub finding: u32,
334    pub reason: String,
335}
336
337/// Parse structured fixer output from the fixer's text response.
338///
339/// Returns a default (empty) `FixerOutput` if parsing fails. We don't want
340/// a fixer parse failure to block the pipeline, and this preserves backward
341/// compatibility with fixers that produce prose-only output.
342pub fn parse_fixer_output(text: &str) -> FixerOutput {
343    extract_json::<FixerOutput>(text).unwrap_or_default()
344}
345
346/// Parse structured review output from the reviewer's text response.
347///
348/// The JSON may be wrapped in markdown code fences. If the output is
349/// unparseable, returns a single warning finding instead of failing the
350/// pipeline. This ensures unreviewed code never silently passes through
351/// while still keeping the pipeline alive for human follow-up.
352pub fn parse_review_output(text: &str) -> ReviewOutput {
353    extract_json(text).unwrap_or_else(|| {
354        tracing::warn!("reviewer returned unparseable output, emitting warning finding");
355        ReviewOutput {
356            findings: vec![Finding {
357                severity: Severity::Warning,
358                category: "review-parse".to_string(),
359                file_path: None,
360                line_number: None,
361                message: "reviewer output was unparseable -- manual review recommended".to_string(),
362            }],
363            summary: "review output unparseable, manual review recommended".to_string(),
364        }
365    })
366}
367
368/// Try to extract a JSON object of type `T` from text that may contain prose,
369/// code fences, or raw JSON.
370///
371/// Attempts three strategies in order:
372/// 1. Direct `serde_json::from_str`
373/// 2. JSON inside markdown code fences
374/// 3. First `{` to last `}` in the text
375fn extract_json<T: DeserializeOwned>(text: &str) -> Option<T> {
376    if let Ok(val) = serde_json::from_str::<T>(text) {
377        return Some(val);
378    }
379
380    if let Some(json_str) = extract_json_from_fences(text) {
381        if let Ok(val) = serde_json::from_str::<T>(json_str) {
382            return Some(val);
383        }
384    }
385
386    let start = text.find('{')?;
387    let end = text.rfind('}')?;
388    if end > start { serde_json::from_str::<T>(&text[start..=end]).ok() } else { None }
389}
390
391fn extract_json_from_fences(text: &str) -> Option<&str> {
392    let start_markers = ["```json\n", "```json\r\n", "```\n", "```\r\n"];
393    for marker in &start_markers {
394        if let Some(start) = text.find(marker) {
395            let content_start = start + marker.len();
396            if let Some(end) = text[content_start..].find("```") {
397                return Some(&text[content_start..content_start + end]);
398            }
399        }
400    }
401    None
402}
403
404#[cfg(test)]
405mod tests {
406    use proptest::prelude::*;
407
408    use super::*;
409
410    const ALL_ROLES: [AgentRole; 4] =
411        [AgentRole::Planner, AgentRole::Implementer, AgentRole::Reviewer, AgentRole::Fixer];
412
413    proptest! {
414        #[test]
415        fn agent_role_display_fromstr_roundtrip(idx in 0..4usize) {
416            let role = ALL_ROLES[idx];
417            let s = role.to_string();
418            let parsed: AgentRole = s.parse().unwrap();
419            assert_eq!(role, parsed);
420        }
421
422        #[test]
423        fn arbitrary_strings_never_panic_on_role_parse(s in "\\PC{1,50}") {
424            let _ = s.parse::<AgentRole>();
425        }
426
427        #[test]
428        fn parse_review_output_never_panics(text in "\\PC{0,500}") {
429            // parse_review_output should never panic on any input and always
430            // returns a valid ReviewOutput (either parsed or warning fallback)
431            let _ = parse_review_output(&text);
432        }
433
434        #[test]
435        fn parse_fixer_output_never_panics(text in "\\PC{0,500}") {
436            let _ = parse_fixer_output(&text);
437        }
438
439        #[test]
440        fn valid_review_json_always_parses(
441            severity in prop_oneof!["critical", "warning", "info"],
442            category in "[a-z]{3,15}",
443            message in "[a-zA-Z0-9 ]{1,50}",
444        ) {
445            let json = format!(
446                r#"{{"findings":[{{"severity":"{severity}","category":"{category}","message":"{message}"}}],"summary":"test"}}"#
447            );
448            let output = parse_review_output(&json);
449            assert_eq!(output.findings.len(), 1);
450            assert_eq!(output.findings[0].category, category);
451        }
452
453        #[test]
454        fn review_json_in_fences_parses(
455            severity in prop_oneof!["critical", "warning", "info"],
456            category in "[a-z]{3,15}",
457            message in "[a-zA-Z0-9 ]{1,50}",
458            prefix in "[a-zA-Z ]{0,30}",
459            suffix in "[a-zA-Z ]{0,30}",
460        ) {
461            let json = format!(
462                r#"{{"findings":[{{"severity":"{severity}","category":"{category}","message":"{message}"}}],"summary":"ok"}}"#
463            );
464            let text = format!("{prefix}\n```json\n{json}\n```\n{suffix}");
465            let output = parse_review_output(&text);
466            assert_eq!(output.findings.len(), 1);
467        }
468    }
469
470    #[test]
471    fn tool_scoping_per_role() {
472        assert_eq!(AgentRole::Planner.allowed_tools(), &["Read", "Glob", "Grep"]);
473        assert_eq!(
474            AgentRole::Implementer.allowed_tools(),
475            &["Read", "Write", "Edit", "Glob", "Grep", "Bash"]
476        );
477        assert_eq!(AgentRole::Reviewer.allowed_tools(), &["Read", "Glob", "Grep"]);
478        assert_eq!(
479            AgentRole::Fixer.allowed_tools(),
480            &["Read", "Write", "Edit", "Glob", "Grep", "Bash"]
481        );
482    }
483
484    #[test]
485    fn role_display_roundtrip() {
486        let roles =
487            [AgentRole::Planner, AgentRole::Implementer, AgentRole::Reviewer, AgentRole::Fixer];
488        for role in roles {
489            let s = role.to_string();
490            let parsed: AgentRole = s.parse().unwrap();
491            assert_eq!(role, parsed);
492        }
493    }
494
495    #[test]
496    fn parse_review_output_valid_json() {
497        let json = r#"{"findings":[{"severity":"critical","category":"bug","file_path":"src/main.rs","line_number":10,"message":"null pointer"}],"summary":"one issue found"}"#;
498        let output = parse_review_output(json);
499        assert_eq!(output.findings.len(), 1);
500        assert_eq!(output.findings[0].severity, Severity::Critical);
501        assert_eq!(output.findings[0].message, "null pointer");
502        assert_eq!(output.summary, "one issue found");
503    }
504
505    #[test]
506    fn parse_review_output_in_code_fences() {
507        let text = r#"Here are my findings:
508
509```json
510{"findings":[{"severity":"warning","category":"style","message":"missing docs"}],"summary":"ok"}
511```
512
513That's it."#;
514        let output = parse_review_output(text);
515        assert_eq!(output.findings.len(), 1);
516        assert_eq!(output.findings[0].severity, Severity::Warning);
517    }
518
519    #[test]
520    fn parse_review_output_embedded_json() {
521        let text = r#"I reviewed the code and found: {"findings":[{"severity":"info","category":"note","message":"looks fine"}],"summary":"clean"} end of review"#;
522        let output = parse_review_output(text);
523        assert_eq!(output.findings.len(), 1);
524    }
525
526    #[test]
527    fn parse_review_output_no_json_returns_warning() {
528        let text = "The code looks great, no issues found.";
529        let output = parse_review_output(text);
530        assert_eq!(output.findings.len(), 1);
531        assert_eq!(output.findings[0].severity, Severity::Warning);
532        assert_eq!(output.findings[0].category, "review-parse");
533        assert!(output.summary.contains("unparseable"));
534    }
535
536    #[test]
537    fn parse_review_output_malformed_json_returns_warning() {
538        let text = r#"{"findings": [{"broken json"#;
539        let output = parse_review_output(text);
540        assert_eq!(output.findings.len(), 1);
541        assert_eq!(output.findings[0].severity, Severity::Warning);
542    }
543
544    // --- Planner output parsing tests ---
545
546    #[test]
547    fn parse_planner_output_valid_json() {
548        let json = r#"{
549            "batches": [{
550                "batch": 1,
551                "issues": [{
552                    "number": 42,
553                    "title": "Add login",
554                    "area": "auth",
555                    "predicted_files": ["src/auth.rs"],
556                    "has_migration": false,
557                    "complexity": "simple"
558                }],
559                "reasoning": "standalone issue"
560            }],
561            "total_issues": 1,
562            "parallel_capacity": 1
563        }"#;
564        let output = parse_planner_output(json).unwrap();
565        assert_eq!(output.batches.len(), 1);
566        assert_eq!(output.batches[0].issues.len(), 1);
567        assert_eq!(output.batches[0].issues[0].number, 42);
568        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Simple);
569        assert!(!output.batches[0].issues[0].has_migration);
570    }
571
572    #[test]
573    fn parse_planner_output_in_code_fences() {
574        let text = r#"Here's the plan:
575
576```json
577{
578    "batches": [{"batch": 1, "issues": [{"number": 1, "complexity": "full"}], "reasoning": "ok"}],
579    "total_issues": 1,
580    "parallel_capacity": 1
581}
582```
583
584That's the plan."#;
585        let output = parse_planner_output(text).unwrap();
586        assert_eq!(output.batches.len(), 1);
587        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Full);
588    }
589
590    #[test]
591    fn parse_planner_output_malformed_returns_none() {
592        assert!(parse_planner_output("not json at all").is_none());
593        assert!(parse_planner_output(r#"{"batches": "broken"}"#).is_none());
594        assert!(parse_planner_output("").is_none());
595    }
596
597    #[test]
598    fn complexity_deserializes_from_strings() {
599        let simple: Complexity = serde_json::from_str(r#""simple""#).unwrap();
600        assert_eq!(simple, Complexity::Simple);
601        let full: Complexity = serde_json::from_str(r#""full""#).unwrap();
602        assert_eq!(full, Complexity::Full);
603    }
604
605    #[test]
606    fn complexity_display_roundtrip() {
607        for c in [Complexity::Simple, Complexity::Full] {
608            let s = c.to_string();
609            let parsed: Complexity = s.parse().unwrap();
610            assert_eq!(c, parsed);
611        }
612    }
613
614    #[test]
615    fn planner_output_defaults_complexity_to_full() {
616        let json = r#"{"batches": [{"batch": 1, "issues": [{"number": 5}], "reasoning": ""}], "total_issues": 1, "parallel_capacity": 1}"#;
617        let output = parse_planner_output(json).unwrap();
618        assert_eq!(output.batches[0].issues[0].complexity, Complexity::Full);
619    }
620
621    #[test]
622    fn planner_output_with_multiple_batches() {
623        let json = r#"{
624            "batches": [
625                {"batch": 1, "issues": [{"number": 1, "complexity": "simple"}, {"number": 2, "complexity": "simple"}], "reasoning": "independent"},
626                {"batch": 2, "issues": [{"number": 3, "complexity": "full"}], "reasoning": "depends on batch 1"}
627            ],
628            "total_issues": 3,
629            "parallel_capacity": 2
630        }"#;
631        let output = parse_planner_output(json).unwrap();
632        assert_eq!(output.batches.len(), 2);
633        assert_eq!(output.batches[0].issues.len(), 2);
634        assert_eq!(output.batches[1].issues.len(), 1);
635        assert_eq!(output.total_issues, 3);
636    }
637
638    // --- DAG planner output parsing tests ---
639
640    #[test]
641    fn parse_graph_output_new_format() {
642        let json = r#"{
643            "nodes": [
644                {"number": 1, "title": "A", "area": "cli", "depends_on": [], "complexity": "simple"},
645                {"number": 2, "title": "B", "area": "db", "depends_on": [1], "complexity": "full"}
646            ],
647            "total_issues": 2,
648            "parallel_capacity": 2
649        }"#;
650        let output = parse_planner_graph_output(json).unwrap();
651        assert_eq!(output.nodes.len(), 2);
652        assert!(output.nodes[0].depends_on.is_empty());
653        assert_eq!(output.nodes[1].depends_on, vec![1]);
654    }
655
656    #[test]
657    fn parse_graph_output_falls_back_to_batch_format() {
658        let json = r#"{
659            "batches": [
660                {"batch": 1, "issues": [{"number": 1, "complexity": "simple"}, {"number": 2, "complexity": "simple"}], "reasoning": "ok"},
661                {"batch": 2, "issues": [{"number": 3, "complexity": "full"}], "reasoning": "deps"}
662            ],
663            "total_issues": 3,
664            "parallel_capacity": 2
665        }"#;
666        let output = parse_planner_graph_output(json).unwrap();
667        assert_eq!(output.nodes.len(), 3);
668        // Batch 1 issues have no dependencies
669        assert!(output.nodes[0].depends_on.is_empty());
670        assert!(output.nodes[1].depends_on.is_empty());
671        // Batch 2 issue depends on batch 1 issues
672        let mut deps = output.nodes[2].depends_on.clone();
673        deps.sort_unstable();
674        assert_eq!(deps, vec![1, 2]);
675    }
676
677    #[test]
678    fn parse_graph_output_malformed_returns_none() {
679        assert!(parse_planner_graph_output("garbage").is_none());
680    }
681
682    #[test]
683    fn batches_to_graph_three_batches() {
684        let legacy = PlannerOutput {
685            batches: vec![
686                Batch {
687                    batch: 1,
688                    issues: vec![PlannedIssue {
689                        number: 1,
690                        title: "A".into(),
691                        area: "a".into(),
692                        predicted_files: vec![],
693                        has_migration: false,
694                        complexity: Complexity::Simple,
695                    }],
696                    reasoning: String::new(),
697                },
698                Batch {
699                    batch: 2,
700                    issues: vec![PlannedIssue {
701                        number: 2,
702                        title: "B".into(),
703                        area: "b".into(),
704                        predicted_files: vec![],
705                        has_migration: false,
706                        complexity: Complexity::Full,
707                    }],
708                    reasoning: String::new(),
709                },
710                Batch {
711                    batch: 3,
712                    issues: vec![PlannedIssue {
713                        number: 3,
714                        title: "C".into(),
715                        area: "c".into(),
716                        predicted_files: vec![],
717                        has_migration: false,
718                        complexity: Complexity::Full,
719                    }],
720                    reasoning: String::new(),
721                },
722            ],
723            total_issues: 3,
724            parallel_capacity: 1,
725        };
726
727        let output = batches_to_graph_output(&legacy);
728        assert_eq!(output.nodes.len(), 3);
729        assert!(output.nodes[0].depends_on.is_empty()); // batch 1
730        assert_eq!(output.nodes[1].depends_on, vec![1]); // batch 2
731        let mut deps = output.nodes[2].depends_on.clone();
732        deps.sort_unstable();
733        assert_eq!(deps, vec![1, 2]); // batch 3
734    }
735
736    // --- Fixer output parsing tests ---
737
738    #[test]
739    fn parse_fixer_output_valid_json() {
740        let json = r#"{"addressed":[{"finding":1,"action":"fixed"}],"disputed":[{"finding":2,"reason":"prop does not exist in v2"}]}"#;
741        let output = parse_fixer_output(json);
742        assert_eq!(output.addressed.len(), 1);
743        assert_eq!(output.addressed[0].finding, 1);
744        assert_eq!(output.disputed.len(), 1);
745        assert_eq!(output.disputed[0].finding, 2);
746        assert_eq!(output.disputed[0].reason, "prop does not exist in v2");
747    }
748
749    #[test]
750    fn parse_fixer_output_in_code_fences() {
751        let text = "I fixed everything.\n\n```json\n{\"addressed\":[{\"finding\":1,\"action\":\"added test\"}],\"disputed\":[]}\n```\n\nDone.";
752        let output = parse_fixer_output(text);
753        assert_eq!(output.addressed.len(), 1);
754        assert!(output.disputed.is_empty());
755    }
756
757    #[test]
758    fn parse_fixer_output_missing_disputed_defaults_empty() {
759        let json = r#"{"addressed":[{"finding":1,"action":"fixed"}]}"#;
760        let output = parse_fixer_output(json);
761        assert_eq!(output.addressed.len(), 1);
762        assert!(output.disputed.is_empty());
763    }
764
765    #[test]
766    fn parse_fixer_output_missing_addressed_defaults_empty() {
767        let json = r#"{"disputed":[{"finding":1,"reason":"API removed"}]}"#;
768        let output = parse_fixer_output(json);
769        assert!(output.addressed.is_empty());
770        assert_eq!(output.disputed.len(), 1);
771    }
772
773    #[test]
774    fn parse_fixer_output_garbage_returns_default() {
775        let output = parse_fixer_output("This is just prose, no JSON here.");
776        assert!(output.addressed.is_empty());
777        assert!(output.disputed.is_empty());
778    }
779
780    #[test]
781    fn parse_fixer_output_empty_returns_default() {
782        let output = parse_fixer_output("");
783        assert!(output.addressed.is_empty());
784        assert!(output.disputed.is_empty());
785    }
786}